From 3b8abb3239530c423c0b97e42af7f7e856e1ee96 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Tue, 2 May 2023 09:08:38 -0700 Subject: mm: kmem: fix a NULL pointer dereference in obj_stock_flush_required() KCSAN found an issue in obj_stock_flush_required(): stock->cached_objcg can be reset between the check and dereference: ================================================================== BUG: KCSAN: data-race in drain_all_stock / drain_obj_stock write to 0xffff888237c2a2f8 of 8 bytes by task 19625 on cpu 0: drain_obj_stock+0x408/0x4e0 mm/memcontrol.c:3306 refill_obj_stock+0x9c/0x1e0 mm/memcontrol.c:3340 obj_cgroup_uncharge+0xe/0x10 mm/memcontrol.c:3408 memcg_slab_free_hook mm/slab.h:587 [inline] __cache_free mm/slab.c:3373 [inline] __do_kmem_cache_free mm/slab.c:3577 [inline] kmem_cache_free+0x105/0x280 mm/slab.c:3602 __d_free fs/dcache.c:298 [inline] dentry_free fs/dcache.c:375 [inline] __dentry_kill+0x422/0x4a0 fs/dcache.c:621 dentry_kill+0x8d/0x1e0 dput+0x118/0x1f0 fs/dcache.c:913 __fput+0x3bf/0x570 fs/file_table.c:329 ____fput+0x15/0x20 fs/file_table.c:349 task_work_run+0x123/0x160 kernel/task_work.c:179 resume_user_mode_work include/linux/resume_user_mode.h:49 [inline] exit_to_user_mode_loop+0xcf/0xe0 kernel/entry/common.c:171 exit_to_user_mode_prepare+0x6a/0xa0 kernel/entry/common.c:203 __syscall_exit_to_user_mode_work kernel/entry/common.c:285 [inline] syscall_exit_to_user_mode+0x26/0x140 kernel/entry/common.c:296 do_syscall_64+0x4d/0xc0 arch/x86/entry/common.c:86 entry_SYSCALL_64_after_hwframe+0x63/0xcd read to 0xffff888237c2a2f8 of 8 bytes by task 19632 on cpu 1: obj_stock_flush_required mm/memcontrol.c:3319 [inline] drain_all_stock+0x174/0x2a0 mm/memcontrol.c:2361 try_charge_memcg+0x6d0/0xd10 mm/memcontrol.c:2703 try_charge mm/memcontrol.c:2837 [inline] mem_cgroup_charge_skmem+0x51/0x140 mm/memcontrol.c:7290 sock_reserve_memory+0xb1/0x390 net/core/sock.c:1025 sk_setsockopt+0x800/0x1e70 net/core/sock.c:1525 udp_lib_setsockopt+0x99/0x6c0 net/ipv4/udp.c:2692 udp_setsockopt+0x73/0xa0 net/ipv4/udp.c:2817 sock_common_setsockopt+0x61/0x70 net/core/sock.c:3668 __sys_setsockopt+0x1c3/0x230 net/socket.c:2271 __do_sys_setsockopt net/socket.c:2282 [inline] __se_sys_setsockopt net/socket.c:2279 [inline] __x64_sys_setsockopt+0x66/0x80 net/socket.c:2279 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x41/0xc0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd value changed: 0xffff8881382d52c0 -> 0xffff888138893740 Reported by Kernel Concurrency Sanitizer on: CPU: 1 PID: 19632 Comm: syz-executor.0 Not tainted 6.3.0-rc2-syzkaller-00387-g534293368afa #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 03/02/2023 Fix it by using READ_ONCE()/WRITE_ONCE() for all accesses to stock->cached_objcg. Link: https://lkml.kernel.org/r/20230502160839.361544-1-roman.gushchin@linux.dev Fixes: bf4f059954dc ("mm: memcg/slab: obj_cgroup API") Signed-off-by: Roman Gushchin Reported-by: syzbot+774c29891415ab0fd29d@syzkaller.appspotmail.com Reported-by: Dmitry Vyukov Link: https://lore.kernel.org/linux-mm/CACT4Y+ZfucZhM60YPphWiCLJr6+SGFhT+jjm8k1P-a_8Kkxsjg@mail.gmail.com/T/#t Reviewed-by: Yosry Ahmed Acked-by: Shakeel Butt Reviewed-by: Dmitry Vyukov Signed-off-by: Andrew Morton --- mm/memcontrol.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4b27e245a055..c823c35c2ed4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3208,12 +3208,12 @@ void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, * accumulating over a page of vmstat data or when pgdat or idx * changes. */ - if (stock->cached_objcg != objcg) { + if (READ_ONCE(stock->cached_objcg) != objcg) { old = drain_obj_stock(stock); obj_cgroup_get(objcg); stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; - stock->cached_objcg = objcg; + WRITE_ONCE(stock->cached_objcg, objcg); stock->cached_pgdat = pgdat; } else if (stock->cached_pgdat != pgdat) { /* Flush the existing cached vmstat data */ @@ -3267,7 +3267,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) local_lock_irqsave(&memcg_stock.stock_lock, flags); stock = this_cpu_ptr(&memcg_stock); - if (objcg == stock->cached_objcg && stock->nr_bytes >= nr_bytes) { + if (objcg == READ_ONCE(stock->cached_objcg) && stock->nr_bytes >= nr_bytes) { stock->nr_bytes -= nr_bytes; ret = true; } @@ -3279,7 +3279,7 @@ static bool consume_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes) static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) { - struct obj_cgroup *old = stock->cached_objcg; + struct obj_cgroup *old = READ_ONCE(stock->cached_objcg); if (!old) return NULL; @@ -3332,7 +3332,7 @@ static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) stock->cached_pgdat = NULL; } - stock->cached_objcg = NULL; + WRITE_ONCE(stock->cached_objcg, NULL); /* * The `old' objects needs to be released by the caller via * obj_cgroup_put() outside of memcg_stock_pcp::stock_lock. @@ -3343,10 +3343,11 @@ static struct obj_cgroup *drain_obj_stock(struct memcg_stock_pcp *stock) static bool obj_stock_flush_required(struct memcg_stock_pcp *stock, struct mem_cgroup *root_memcg) { + struct obj_cgroup *objcg = READ_ONCE(stock->cached_objcg); struct mem_cgroup *memcg; - if (stock->cached_objcg) { - memcg = obj_cgroup_memcg(stock->cached_objcg); + if (objcg) { + memcg = obj_cgroup_memcg(objcg); if (memcg && mem_cgroup_is_descendant(memcg, root_memcg)) return true; } @@ -3365,10 +3366,10 @@ static void refill_obj_stock(struct obj_cgroup *objcg, unsigned int nr_bytes, local_lock_irqsave(&memcg_stock.stock_lock, flags); stock = this_cpu_ptr(&memcg_stock); - if (stock->cached_objcg != objcg) { /* reset if necessary */ + if (READ_ONCE(stock->cached_objcg) != objcg) { /* reset if necessary */ old = drain_obj_stock(stock); obj_cgroup_get(objcg); - stock->cached_objcg = objcg; + WRITE_ONCE(stock->cached_objcg, objcg); stock->nr_bytes = atomic_read(&objcg->nr_charged_bytes) ? atomic_xchg(&objcg->nr_charged_bytes, 0) : 0; allow_uncharge = true; /* Allow uncharge when objcg changes */ -- cgit v1.2.3 From f785a8f21a9cc46fced9f53c51a6f2dc647ed484 Mon Sep 17 00:00:00 2001 From: Roman Gushchin Date: Tue, 2 May 2023 09:08:39 -0700 Subject: mm: memcg: use READ_ONCE()/WRITE_ONCE() to access stock->cached A memcg pointer in the percpu stock can be accessed by drain_all_stock() from another cpu in a lockless way. In theory it might lead to an issue, similar to the one which has been discovered with stock->cached_objcg, where the pointer was zeroed between the check for being NULL and dereferencing. In this case the issue is unlikely a real problem, but to make it bulletproof and similar to stock->cached_objcg, let's annotate all accesses to stock->cached with READ_ONCE()/WTRITE_ONCE(). Link: https://lkml.kernel.org/r/20230502160839.361544-2-roman.gushchin@linux.dev Signed-off-by: Roman Gushchin Acked-by: Shakeel Butt Cc: Dmitry Vyukov Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/memcontrol.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c823c35c2ed4..1e364ad495a3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2275,7 +2275,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) local_lock_irqsave(&memcg_stock.stock_lock, flags); stock = this_cpu_ptr(&memcg_stock); - if (memcg == stock->cached && stock->nr_pages >= nr_pages) { + if (memcg == READ_ONCE(stock->cached) && stock->nr_pages >= nr_pages) { stock->nr_pages -= nr_pages; ret = true; } @@ -2290,7 +2290,7 @@ static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) */ static void drain_stock(struct memcg_stock_pcp *stock) { - struct mem_cgroup *old = stock->cached; + struct mem_cgroup *old = READ_ONCE(stock->cached); if (!old) return; @@ -2303,7 +2303,7 @@ static void drain_stock(struct memcg_stock_pcp *stock) } css_put(&old->css); - stock->cached = NULL; + WRITE_ONCE(stock->cached, NULL); } static void drain_local_stock(struct work_struct *dummy) @@ -2338,10 +2338,10 @@ static void __refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) struct memcg_stock_pcp *stock; stock = this_cpu_ptr(&memcg_stock); - if (stock->cached != memcg) { /* reset if necessary */ + if (READ_ONCE(stock->cached) != memcg) { /* reset if necessary */ drain_stock(stock); css_get(&memcg->css); - stock->cached = memcg; + WRITE_ONCE(stock->cached, memcg); } stock->nr_pages += nr_pages; @@ -2383,7 +2383,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) bool flush = false; rcu_read_lock(); - memcg = stock->cached; + memcg = READ_ONCE(stock->cached); if (memcg && stock->nr_pages && mem_cgroup_is_descendant(memcg, root_memcg)) flush = true; -- cgit v1.2.3 From 4bb6dc79d987b243d65c70c5029e51e719cfb94b Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Fri, 28 Apr 2023 13:54:38 -0700 Subject: migrate_pages: avoid blocking for IO in MIGRATE_SYNC_LIGHT The MIGRATE_SYNC_LIGHT mode is intended to block for things that will finish quickly but not for things that will take a long time. Exactly how long is too long is not well defined, but waits of tens of milliseconds is likely non-ideal. When putting a Chromebook under memory pressure (opening over 90 tabs on a 4GB machine) it was fairly easy to see delays waiting for some locks in the kcompactd code path of > 100 ms. While the laptop wasn't amazingly usable in this state, it was still limping along and this state isn't something artificial. Sometimes we simply end up with a lot of memory pressure. Putting the same Chromebook under memory pressure while it was running Android apps (though not stressing them) showed a much worse result (NOTE: this was on a older kernel but the codepaths here are similar). Android apps on ChromeOS currently run from a 128K-block, zlib-compressed, loopback-mounted squashfs disk. If we get a page fault from something backed by the squashfs filesystem we could end up holding a folio lock while reading enough from disk to decompress 128K (and then decompressing it using the somewhat slow zlib algorithms). That reading goes through the ext4 subsystem (because it's a loopback mount) before eventually ending up in the block subsystem. This extra jaunt adds extra overhead. Without much work I could see cases where we ended up blocked on a folio lock for over a second. With more extreme memory pressure I could see up to 25 seconds. We considered adding a timeout in the case of MIGRATE_SYNC_LIGHT for the two locks that were seen to be slow [1] and that generated much discussion. After discussion, it was decided that we should avoid waiting for the two locks during MIGRATE_SYNC_LIGHT if they were being held for IO. We'll continue with the unbounded wait for the more full SYNC modes. With this change, I couldn't see any slow waits on these locks with my previous testcases. NOTE: The reason I stated digging into this originally isn't because some benchmark had gone awry, but because we've received in-the-field crash reports where we have a hung task waiting on the page lock (which is the equivalent code path on old kernels). While the root cause of those crashes is likely unrelated and won't be fixed by this patch, analyzing those crash reports did point out these very long waits seemed like something good to fix. With this patch we should no longer hang waiting on these locks, but presumably the system will still be in a bad shape and hang somewhere else. [1] https://lore.kernel.org/r/20230421151135.v2.1.I2b71e11264c5c214bc59744b9e13e4c353bc5714@changeid Link: https://lkml.kernel.org/r/20230428135414.v3.1.Ia86ccac02a303154a0b8bc60567e7a95d34c96d3@changeid Signed-off-by: Douglas Anderson Suggested-by: Matthew Wilcox Reviewed-by: Matthew Wilcox (Oracle) Acked-by: Mel Gorman Cc: Hillf Danton Cc: Gao Xiang Cc: Alexander Viro Cc: Christian Brauner Cc: Gao Xiang Cc: Huang Ying Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/migrate.c | 49 ++++++++++++++++++++++++++----------------------- 1 file changed, 26 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 01cac26a3127..43d818fd1afd 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -692,37 +692,32 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, enum migrate_mode mode) { struct buffer_head *bh = head; + struct buffer_head *failed_bh; - /* Simple case, sync compaction */ - if (mode != MIGRATE_ASYNC) { - do { - lock_buffer(bh); - bh = bh->b_this_page; - - } while (bh != head); - - return true; - } - - /* async case, we cannot block on lock_buffer so use trylock_buffer */ do { if (!trylock_buffer(bh)) { - /* - * We failed to lock the buffer and cannot stall in - * async migration. Release the taken locks - */ - struct buffer_head *failed_bh = bh; - bh = head; - while (bh != failed_bh) { - unlock_buffer(bh); - bh = bh->b_this_page; - } - return false; + if (mode == MIGRATE_ASYNC) + goto unlock; + if (mode == MIGRATE_SYNC_LIGHT && !buffer_uptodate(bh)) + goto unlock; + lock_buffer(bh); } bh = bh->b_this_page; } while (bh != head); + return true; + +unlock: + /* We failed to lock the buffer and cannot stall. */ + failed_bh = bh; + bh = head; + while (bh != failed_bh) { + unlock_buffer(bh); + bh = bh->b_this_page; + } + + return false; } static int __buffer_migrate_folio(struct address_space *mapping, @@ -1156,6 +1151,14 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page if (current->flags & PF_MEMALLOC) goto out; + /* + * In "light" mode, we can wait for transient locks (eg + * inserting a page into the page table), but it's not + * worth waiting for I/O. + */ + if (mode == MIGRATE_SYNC_LIGHT && !folio_test_uptodate(src)) + goto out; + folio_lock(src); } locked = true; -- cgit v1.2.3 From 5b42360c73b0679505dac6ec44d234cfec61120c Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 28 Apr 2023 13:24:05 +0000 Subject: memcg: use seq_buf_do_printk() with mem_cgroup_print_oom_meminfo() Currently, we format all the memcg stats into a buffer in mem_cgroup_print_oom_meminfo() and use pr_info() to dump it to the logs. However, this buffer is large in size. Although it is currently working as intended, ther is a dependency between the memcg stats buffer and the printk record size limit. If we add more stats in the future and the buffer becomes larger than the printk record size limit, or if the prink record size limit is reduced, the logs may be truncated. It is safer to use seq_buf_do_printk(), which will automatically break up the buffer at line breaks and issue small printk() calls. Refactor the code to move the seq_buf from memory_stat_format() to its callers, and use seq_buf_do_printk() to print the seq_buf in mem_cgroup_print_oom_meminfo(). Link: https://lkml.kernel.org/r/20230428132406.2540811-2-yosryahmed@google.com Signed-off-by: Yosry Ahmed Acked-by: Michal Hocko Reviewed-by: Sergey Senozhatsky Acked-by: Shakeel Butt Reviewed-by: Muchun Song Cc: Johannes Weiner Cc: Michal Hocko Cc: Petr Mladek Cc: Roman Gushchin Cc: Steven Rostedt (Google) Signed-off-by: Andrew Morton --- mm/memcontrol.c | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 1e364ad495a3..a79a47f7fc20 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1580,13 +1580,10 @@ static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, return memcg_page_state(memcg, item) * memcg_page_state_unit(item); } -static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize) +static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) { - struct seq_buf s; int i; - seq_buf_init(&s, buf, bufsize); - /* * Provide statistics on the state of the memory subsystem as * well as cumulative event counters that show past behavior. @@ -1603,21 +1600,21 @@ static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize) u64 size; size = memcg_page_state_output(memcg, memory_stats[i].idx); - seq_buf_printf(&s, "%s %llu\n", memory_stats[i].name, size); + seq_buf_printf(s, "%s %llu\n", memory_stats[i].name, size); if (unlikely(memory_stats[i].idx == NR_SLAB_UNRECLAIMABLE_B)) { size += memcg_page_state_output(memcg, NR_SLAB_RECLAIMABLE_B); - seq_buf_printf(&s, "slab %llu\n", size); + seq_buf_printf(s, "slab %llu\n", size); } } /* Accumulated memory events */ - seq_buf_printf(&s, "pgscan %lu\n", + seq_buf_printf(s, "pgscan %lu\n", memcg_events(memcg, PGSCAN_KSWAPD) + memcg_events(memcg, PGSCAN_DIRECT) + memcg_events(memcg, PGSCAN_KHUGEPAGED)); - seq_buf_printf(&s, "pgsteal %lu\n", + seq_buf_printf(s, "pgsteal %lu\n", memcg_events(memcg, PGSTEAL_KSWAPD) + memcg_events(memcg, PGSTEAL_DIRECT) + memcg_events(memcg, PGSTEAL_KHUGEPAGED)); @@ -1627,13 +1624,13 @@ static void memory_stat_format(struct mem_cgroup *memcg, char *buf, int bufsize) memcg_vm_event_stat[i] == PGPGOUT) continue; - seq_buf_printf(&s, "%s %lu\n", + seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg_vm_event_stat[i]), memcg_events(memcg, memcg_vm_event_stat[i])); } /* The above should easily fit into one page */ - WARN_ON_ONCE(seq_buf_has_overflowed(&s)); + WARN_ON_ONCE(seq_buf_has_overflowed(s)); } #define K(x) ((x) << (PAGE_SHIFT-10)) @@ -1671,6 +1668,7 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) { /* Use static buffer, for the caller is holding oom_lock. */ static char buf[PAGE_SIZE]; + struct seq_buf s; lockdep_assert_held(&oom_lock); @@ -1693,8 +1691,9 @@ void mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) pr_info("Memory cgroup stats for "); pr_cont_cgroup_path(memcg->css.cgroup); pr_cont(":"); - memory_stat_format(memcg, buf, sizeof(buf)); - pr_info("%s", buf); + seq_buf_init(&s, buf, sizeof(buf)); + memory_stat_format(memcg, &s); + seq_buf_do_printk(&s, KERN_INFO); } /* @@ -6635,10 +6634,12 @@ static int memory_stat_show(struct seq_file *m, void *v) { struct mem_cgroup *memcg = mem_cgroup_from_seq(m); char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + struct seq_buf s; if (!buf) return -ENOMEM; - memory_stat_format(memcg, buf, PAGE_SIZE); + seq_buf_init(&s, buf, PAGE_SIZE); + memory_stat_format(memcg, &s); seq_puts(m, buf); kfree(buf); return 0; -- cgit v1.2.3 From dddb44ffa0d59c8a3f2a5cb9690ccebe3150810c Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 28 Apr 2023 13:24:06 +0000 Subject: memcg: dump memory.stat during cgroup OOM for v1 Patch series "memcg: OOM log improvements", v2. This short patch series brings back some cgroup v1 stats in OOM logs that were unnecessarily changed before. It also makes memcg OOM logs less reliant on printk() internals. This patch (of 2): Commit c8713d0b2312 ("mm: memcontrol: dump memory.stat during cgroup OOM") made sure we dump all the stats in memory.stat during a cgroup OOM, but it also introduced a slight behavioral change. The code used to print the non-hierarchical v1 cgroup stats for the entire cgroup subtree, now it only prints the v2 cgroup stats for the cgroup under OOM. For cgroup v1 users, this introduces a few problems: (a) The non-hierarchical stats of the memcg under OOM are no longer shown. (b) A couple of v1-only stats (e.g. pgpgin, pgpgout) are no longer shown. (c) We show the list of cgroup v2 stats, even in cgroup v1. This list of stats is not tracked with v1 in mind. While most of the stats seem to be working on v1, there may be some stats that are not fully or correctly tracked. Although OOM log is not set in stone, we should not change it for no reason. When upgrading the kernel version to a version including commit c8713d0b2312 ("mm: memcontrol: dump memory.stat during cgroup OOM"), these behavioral changes are noticed in cgroup v1. The fix is simple. Commit c8713d0b2312 ("mm: memcontrol: dump memory.stat during cgroup OOM") separated stats formatting from stats display for v2, to reuse the stats formatting in the OOM logs. Do the same for v1. Move the v2 specific formatting from memory_stat_format() to memcg_stat_format(), add memcg1_stat_format() for v1, and make memory_stat_format() select between them based on cgroup version. Since memory_stat_show() now works for both v1 & v2, drop memcg_stat_show(). Link: https://lkml.kernel.org/r/20230428132406.2540811-1-yosryahmed@google.com Link: https://lkml.kernel.org/r/20230428132406.2540811-3-yosryahmed@google.com Signed-off-by: Yosry Ahmed Acked-by: Shakeel Butt Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Muchun Song Cc: Petr Mladek Cc: Roman Gushchin Cc: Sergey Senozhatsky Cc: Steven Rostedt (Google) Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/memcontrol.c | 60 +++++++++++++++++++++++++++++++++------------------------ 1 file changed, 35 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a79a47f7fc20..a05c53ab5238 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1580,7 +1580,7 @@ static inline unsigned long memcg_page_state_output(struct mem_cgroup *memcg, return memcg_page_state(memcg, item) * memcg_page_state_unit(item); } -static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) +static void memcg_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) { int i; @@ -1633,6 +1633,17 @@ static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) WARN_ON_ONCE(seq_buf_has_overflowed(s)); } +static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s); + +static void memory_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) +{ + if (cgroup_subsys_on_dfl(memory_cgrp_subsys)) + memcg_stat_format(memcg, s); + else + memcg1_stat_format(memcg, s); + WARN_ON_ONCE(seq_buf_has_overflowed(s)); +} + #define K(x) ((x) << (PAGE_SHIFT-10)) /** * mem_cgroup_print_oom_context: Print OOM information relevant to @@ -4135,9 +4146,8 @@ static const unsigned int memcg1_events[] = { PGMAJFAULT, }; -static int memcg_stat_show(struct seq_file *m, void *v) +static void memcg1_stat_format(struct mem_cgroup *memcg, struct seq_buf *s) { - struct mem_cgroup *memcg = mem_cgroup_from_seq(m); unsigned long memory, memsw; struct mem_cgroup *mi; unsigned int i; @@ -4152,18 +4162,18 @@ static int memcg_stat_show(struct seq_file *m, void *v) if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) continue; nr = memcg_page_state_local(memcg, memcg1_stats[i]); - seq_printf(m, "%s %lu\n", memcg1_stat_names[i], + seq_buf_printf(s, "%s %lu\n", memcg1_stat_names[i], nr * memcg_page_state_unit(memcg1_stats[i])); } for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) - seq_printf(m, "%s %lu\n", vm_event_name(memcg1_events[i]), - memcg_events_local(memcg, memcg1_events[i])); + seq_buf_printf(s, "%s %lu\n", vm_event_name(memcg1_events[i]), + memcg_events_local(memcg, memcg1_events[i])); for (i = 0; i < NR_LRU_LISTS; i++) - seq_printf(m, "%s %lu\n", lru_list_name(i), - memcg_page_state_local(memcg, NR_LRU_BASE + i) * - PAGE_SIZE); + seq_buf_printf(s, "%s %lu\n", lru_list_name(i), + memcg_page_state_local(memcg, NR_LRU_BASE + i) * + PAGE_SIZE); /* Hierarchical information */ memory = memsw = PAGE_COUNTER_MAX; @@ -4171,11 +4181,11 @@ static int memcg_stat_show(struct seq_file *m, void *v) memory = min(memory, READ_ONCE(mi->memory.max)); memsw = min(memsw, READ_ONCE(mi->memsw.max)); } - seq_printf(m, "hierarchical_memory_limit %llu\n", - (u64)memory * PAGE_SIZE); + seq_buf_printf(s, "hierarchical_memory_limit %llu\n", + (u64)memory * PAGE_SIZE); if (do_memsw_account()) - seq_printf(m, "hierarchical_memsw_limit %llu\n", - (u64)memsw * PAGE_SIZE); + seq_buf_printf(s, "hierarchical_memsw_limit %llu\n", + (u64)memsw * PAGE_SIZE); for (i = 0; i < ARRAY_SIZE(memcg1_stats); i++) { unsigned long nr; @@ -4183,19 +4193,19 @@ static int memcg_stat_show(struct seq_file *m, void *v) if (memcg1_stats[i] == MEMCG_SWAP && !do_memsw_account()) continue; nr = memcg_page_state(memcg, memcg1_stats[i]); - seq_printf(m, "total_%s %llu\n", memcg1_stat_names[i], + seq_buf_printf(s, "total_%s %llu\n", memcg1_stat_names[i], (u64)nr * memcg_page_state_unit(memcg1_stats[i])); } for (i = 0; i < ARRAY_SIZE(memcg1_events); i++) - seq_printf(m, "total_%s %llu\n", - vm_event_name(memcg1_events[i]), - (u64)memcg_events(memcg, memcg1_events[i])); + seq_buf_printf(s, "total_%s %llu\n", + vm_event_name(memcg1_events[i]), + (u64)memcg_events(memcg, memcg1_events[i])); for (i = 0; i < NR_LRU_LISTS; i++) - seq_printf(m, "total_%s %llu\n", lru_list_name(i), - (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * - PAGE_SIZE); + seq_buf_printf(s, "total_%s %llu\n", lru_list_name(i), + (u64)memcg_page_state(memcg, NR_LRU_BASE + i) * + PAGE_SIZE); #ifdef CONFIG_DEBUG_VM { @@ -4210,12 +4220,10 @@ static int memcg_stat_show(struct seq_file *m, void *v) anon_cost += mz->lruvec.anon_cost; file_cost += mz->lruvec.file_cost; } - seq_printf(m, "anon_cost %lu\n", anon_cost); - seq_printf(m, "file_cost %lu\n", file_cost); + seq_buf_printf(s, "anon_cost %lu\n", anon_cost); + seq_buf_printf(s, "file_cost %lu\n", file_cost); } #endif - - return 0; } static u64 mem_cgroup_swappiness_read(struct cgroup_subsys_state *css, @@ -5059,6 +5067,8 @@ static int mem_cgroup_slab_show(struct seq_file *m, void *p) } #endif +static int memory_stat_show(struct seq_file *m, void *v); + static struct cftype mem_cgroup_legacy_files[] = { { .name = "usage_in_bytes", @@ -5091,7 +5101,7 @@ static struct cftype mem_cgroup_legacy_files[] = { }, { .name = "stat", - .seq_show = memcg_stat_show, + .seq_show = memory_stat_show, }, { .name = "force_empty", -- cgit v1.2.3 From 8b9167cd9ef039d95b65ef9600a7507795173121 Mon Sep 17 00:00:00 2001 From: Wen Yang Date: Tue, 25 Apr 2023 23:52:35 +0800 Subject: mm: compaction: optimize compact_memory to comply with the admin-guide For the /proc/sys/vm/compact_memory file, the admin-guide states: When 1 is written to the file, all zones are compacted such that free memory is available in contiguous blocks where possible. This can be important for example in the allocation of huge pages although processes will also directly compact memory as required But it was not strictly followed, writing any value would cause all zones to be compacted. It has been slightly optimized to comply with the admin-guide. Enforce the 1 on the unlikely chance that the sysctl handler is ever extended to do something different. Commit ef4984384172 ("mm/compaction: remove unused variable sysctl_compact_memory") has also been optimized a bit here, as the declaration in the external header file has been eliminated, and sysctl_compact_memory also needs to be verified. [akpm@linux-foundation.org: add __read_mostly, per Mel] Link: https://lkml.kernel.org/r/tencent_DFF54DB2A60F3333F97D3F6B5441519B050A@qq.com Signed-off-by: Wen Yang Acked-by: Mel Gorman Cc: Oscar Salvador Cc: William Lam Cc: Pintu Kumar Cc: Fu Wei Signed-off-by: Andrew Morton --- mm/compaction.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index c8bcdea15f5f..5584fa5fa3d4 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1736,6 +1736,7 @@ static int sysctl_compact_unevictable_allowed __read_mostly = CONFIG_COMPACT_UNE */ static unsigned int __read_mostly sysctl_compaction_proactiveness = 20; static int sysctl_extfrag_threshold = 500; +static int __read_mostly sysctl_compact_memory; static inline void update_fast_start_pfn(struct compact_control *cc, unsigned long pfn) @@ -2780,6 +2781,15 @@ static int compaction_proactiveness_sysctl_handler(struct ctl_table *table, int static int sysctl_compaction_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { + int ret; + + ret = proc_dointvec(table, write, buffer, length, ppos); + if (ret) + return ret; + + if (sysctl_compact_memory != 1) + return -EINVAL; + if (write) compact_nodes(); @@ -3095,7 +3105,7 @@ static int proc_dointvec_minmax_warn_RT_change(struct ctl_table *table, static struct ctl_table vm_compaction[] = { { .procname = "compact_memory", - .data = NULL, + .data = &sysctl_compact_memory, .maxlen = sizeof(int), .mode = 0200, .proc_handler = sysctl_compaction_handler, -- cgit v1.2.3 From 3c4322c94b9af33dc62e47cb80c057f9814fb595 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Mon, 24 Apr 2023 21:45:39 +0800 Subject: mm/page_alloc: drop the unnecessary pfn_valid() for start pfn __pageblock_pfn_to_page() currently performs both pfn_valid check and pfn_to_online_page(). The former one is redundant because the latter is a stronger check. Drop pfn_valid(). Link: https://lkml.kernel.org/r/c3868b58c6714c09a43440d7d02c7b4eed6e03f6.1682342634.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Reviewed-by: David Hildenbrand Reviewed-by: "Huang, Ying" Acked-by: Michal Hocko Cc: Mel Gorman Cc: Mike Rapoport (IBM) Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 47421bedc12b..af9c995d3c1e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1521,7 +1521,7 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn, /* end_pfn is one past the range we are checking */ end_pfn--; - if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn)) + if (!pfn_valid(end_pfn)) return NULL; start_page = pfn_to_online_page(start_pfn); -- cgit v1.2.3 From 190409caaf7e3eee6926943488e486590efe6fde Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 21 Apr 2023 17:40:17 +0000 Subject: memcg: flush stats non-atomically in mem_cgroup_wb_stats() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous patch moved the wb_over_bg_thresh()->mem_cgroup_wb_stats() code path in wb_writeback() outside the lock section. We no longer need to flush the stats atomically. Flush the stats non-atomically. Link: https://lkml.kernel.org/r/20230421174020.2994750-3-yosryahmed@google.com Signed-off-by: Yosry Ahmed Reviewed-by: Michal Koutný Acked-by: Shakeel Butt Acked-by: Tejun Heo Cc: Alexander Viro Cc: Christian Brauner Cc: Jan Kara Cc: Jens Axboe Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Signed-off-by: Andrew Morton --- mm/memcontrol.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a05c53ab5238..929162c5f45f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4656,11 +4656,7 @@ void mem_cgroup_wb_stats(struct bdi_writeback *wb, unsigned long *pfilepages, struct mem_cgroup *memcg = mem_cgroup_from_css(wb->memcg_css); struct mem_cgroup *parent; - /* - * wb_writeback() takes a spinlock and calls - * wb_over_bg_thresh()->mem_cgroup_wb_stats(). Do not sleep. - */ - mem_cgroup_flush_stats_atomic(); + mem_cgroup_flush_stats(); *pdirty = memcg_page_state(memcg, NR_FILE_DIRTY); *pwriteback = memcg_page_state(memcg, NR_WRITEBACK); -- cgit v1.2.3 From f82a7a86dbfbd0ee81a4907ca41ba78bda1846f4 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 21 Apr 2023 17:40:18 +0000 Subject: memcg: calculate root usage from global state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently, we approximate the root usage by adding the memcg stats for anon, file, and conditionally swap (for memsw). To read the memcg stats we need to invoke an rstat flush. rstat flushes can be expensive, they scale with the number of cpus and cgroups on the system. mem_cgroup_usage() is called by memcg_events()->mem_cgroup_threshold() with irqs disabled, so such an expensive operation with irqs disabled can cause problems. Instead, approximate the root usage from global state. This is not 100% accurate, but the root usage has always been ill-defined anyway. Link: https://lkml.kernel.org/r/20230421174020.2994750-4-yosryahmed@google.com Signed-off-by: Yosry Ahmed Reviewed-by: Michal Koutný Acked-by: Shakeel Butt Cc: Alexander Viro Cc: Christian Brauner Cc: Jan Kara Cc: Jens Axboe Cc: Johannes Weiner Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Tejun Heo Signed-off-by: Andrew Morton --- mm/memcontrol.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 929162c5f45f..7474aa8e4026 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3710,27 +3710,13 @@ static unsigned long mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) if (mem_cgroup_is_root(memcg)) { /* - * We can reach here from irq context through: - * uncharge_batch() - * |--memcg_check_events() - * |--mem_cgroup_threshold() - * |--__mem_cgroup_threshold() - * |--mem_cgroup_usage - * - * rstat flushing is an expensive operation that should not be - * done from irq context; use stale stats in this case. - * Arguably, usage threshold events are not reliable on the root - * memcg anyway since its usage is ill-defined. - * - * Additionally, other call paths through memcg_check_events() - * disable irqs, so make sure we are flushing stats atomically. + * Approximate root's usage from global state. This isn't + * perfect, but the root usage was always an approximation. */ - if (in_task()) - mem_cgroup_flush_stats_atomic(); - val = memcg_page_state(memcg, NR_FILE_PAGES) + - memcg_page_state(memcg, NR_ANON_MAPPED); + val = global_node_page_state(NR_FILE_PAGES) + + global_node_page_state(NR_ANON_MAPPED); if (swap) - val += memcg_page_state(memcg, MEMCG_SWAP); + val += total_swap_pages - get_nr_swap_pages(); } else { if (!swap) val = page_counter_read(&memcg->memory); -- cgit v1.2.3 From 35822fdae3bf509532b0954088070f17de51ff15 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Fri, 21 Apr 2023 17:40:19 +0000 Subject: memcg: remove mem_cgroup_flush_stats_atomic() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previous patches removed all callers of mem_cgroup_flush_stats_atomic(). Remove the function and simplify the code. Link: https://lkml.kernel.org/r/20230421174020.2994750-5-yosryahmed@google.com Signed-off-by: Yosry Ahmed Acked-by: Shakeel Butt Cc: Alexander Viro Cc: Christian Brauner Cc: Jan Kara Cc: Jens Axboe Cc: Johannes Weiner Cc: Michal Hocko Cc: Michal Koutný Cc: Muchun Song Cc: Roman Gushchin Cc: Tejun Heo Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 5 ----- mm/memcontrol.c | 24 +++++------------------- 2 files changed, 5 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 222d7370134c..00a88cf947e1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -1038,7 +1038,6 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec, } void mem_cgroup_flush_stats(void); -void mem_cgroup_flush_stats_atomic(void); void mem_cgroup_flush_stats_ratelimited(void); void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, @@ -1537,10 +1536,6 @@ static inline void mem_cgroup_flush_stats(void) { } -static inline void mem_cgroup_flush_stats_atomic(void) -{ -} - static inline void mem_cgroup_flush_stats_ratelimited(void) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7474aa8e4026..2184a9c566f1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -639,7 +639,7 @@ static inline void memcg_rstat_updated(struct mem_cgroup *memcg, int val) } } -static void do_flush_stats(bool atomic) +static void do_flush_stats(void) { /* * We always flush the entire tree, so concurrent flushers can just @@ -652,30 +652,16 @@ static void do_flush_stats(bool atomic) WRITE_ONCE(flush_next_time, jiffies_64 + 2*FLUSH_TIME); - if (atomic) - cgroup_rstat_flush_atomic(root_mem_cgroup->css.cgroup); - else - cgroup_rstat_flush(root_mem_cgroup->css.cgroup); + cgroup_rstat_flush(root_mem_cgroup->css.cgroup); atomic_set(&stats_flush_threshold, 0); atomic_set(&stats_flush_ongoing, 0); } -static bool should_flush_stats(void) -{ - return atomic_read(&stats_flush_threshold) > num_online_cpus(); -} - void mem_cgroup_flush_stats(void) { - if (should_flush_stats()) - do_flush_stats(false); -} - -void mem_cgroup_flush_stats_atomic(void) -{ - if (should_flush_stats()) - do_flush_stats(true); + if (atomic_read(&stats_flush_threshold) > num_online_cpus()) + do_flush_stats(); } void mem_cgroup_flush_stats_ratelimited(void) @@ -690,7 +676,7 @@ static void flush_memcg_stats_dwork(struct work_struct *w) * Always flush here so that flushing in latency-sensitive paths is * as cheap as possible. */ - do_flush_stats(false); + do_flush_stats(); queue_delayed_work(system_unbound_wq, &stats_flush_dwork, FLUSH_TIME); } -- cgit v1.2.3 From 857f21397f7113e4f764aa65fb0160eb7f404808 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Wed, 19 Apr 2023 03:07:38 +0000 Subject: memcg, oom: remove unnecessary check in mem_cgroup_oom_synchronize() mem_cgroup_oom_synchronize() is only used when the memcg oom handling is handed over to the edge of the #PF path. Since commit 29ef680ae7c2 ("memcg, oom: move out_of_memory back to the charge path") this is the case only when the kernel memcg oom killer is disabled (current->memcg_in_oom is only set if memcg->oom_kill_disable). Therefore a check for oom_kill_disable in mem_cgroup_oom_synchronize() is not required. Link: https://lkml.kernel.org/r/20230419030739.115845-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: Michal Hocko Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2184a9c566f1..e8aead97454b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2024,16 +2024,9 @@ bool mem_cgroup_oom_synchronize(bool handle) if (locked) mem_cgroup_oom_notify(memcg); - if (locked && !READ_ONCE(memcg->oom_kill_disable)) { - mem_cgroup_unmark_under_oom(memcg); - finish_wait(&memcg_oom_waitq, &owait.wait); - mem_cgroup_out_of_memory(memcg, current->memcg_oom_gfp_mask, - current->memcg_oom_order); - } else { - schedule(); - mem_cgroup_unmark_under_oom(memcg); - finish_wait(&memcg_oom_waitq, &owait.wait); - } + schedule(); + mem_cgroup_unmark_under_oom(memcg); + finish_wait(&memcg_oom_waitq, &owait.wait); if (locked) { mem_cgroup_oom_unlock(memcg); -- cgit v1.2.3 From 18b1d18bc2bd8f54e8df6bc8096185361a6d1b15 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Wed, 19 Apr 2023 03:07:39 +0000 Subject: memcg, oom: remove explicit wakeup in mem_cgroup_oom_synchronize() Before commit 29ef680ae7c2 ("memcg, oom: move out_of_memory back to the charge path"), all memcg oom killers were delayed to page fault path. And the explicit wakeup is used in this case: thread A: ... if (locked) { // complete oom-kill, hold the lock mem_cgroup_oom_unlock(memcg); ... } ... thread B: ... if (locked && !memcg->oom_kill_disable) { ... } else { schedule(); // can't acquire the lock ... } ... The reason is that thread A kicks off the OOM-killer, which leads to wakeups from the uncharges of the exiting task. But thread B is not guaranteed to see them if it enters the OOM path after the OOM kills but before thread A releases the lock. Now only oom_kill_disable case is handled from the #PF path. In that case it is userspace to trigger the wake up not the #PF path itself. All potential paths to free some charges are responsible to call memcg_oom_recover() , so the explicit wakeup is not needed in the mem_cgroup_oom_synchronize() path which doesn't release any memory itself. Link: https://lkml.kernel.org/r/20230419030739.115845-2-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: Michal Hocko Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e8aead97454b..d31fb1e2cb33 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2028,15 +2028,8 @@ bool mem_cgroup_oom_synchronize(bool handle) mem_cgroup_unmark_under_oom(memcg); finish_wait(&memcg_oom_waitq, &owait.wait); - if (locked) { + if (locked) mem_cgroup_oom_unlock(memcg); - /* - * There is no guarantee that an OOM-lock contender - * sees the wakeups triggered by the OOM kill - * uncharges. Wake any sleepers explicitly. - */ - memcg_oom_recover(memcg); - } cleanup: current->memcg_in_oom = NULL; css_put(&memcg->css); -- cgit v1.2.3 From ffcb5f5262b756a598eefb11e340bbd027cde037 Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Tue, 2 May 2023 18:36:06 -0700 Subject: workingset: refactor LRU refault to expose refault recency check Patch series "cachestat: a new syscall for page cache state of files", v13. There is currently no good way to query the page cache statistics of large files and directory trees. There is mincore(), but it scales poorly: the kernel writes out a lot of bitmap data that userspace has to aggregate, when the user really does not care about per-page information in that case. The user also needs to mmap and unmap each file as it goes along, which can be quite slow as well. Some use cases where this information could come in handy: * Allowing database to decide whether to perform an index scan or direct table queries based on the in-memory cache state of the index. * Visibility into the writeback algorithm, for performance issues diagnostic. * Workload-aware writeback pacing: estimating IO fulfilled by page cache (and IO to be done) within a range of a file, allowing for more frequent syncing when and where there is IO capacity, and batching when there is not. * Computing memory usage of large files/directory trees, analogous to the du tool for disk usage. More information about these use cases could be found in this thread: https://lore.kernel.org/lkml/20230315170934.GA97793@cmpxchg.org/ This series of patches introduces a new system call, cachestat, that summarizes the page cache statistics (number of cached pages, dirty pages, pages marked for writeback, evicted pages etc.) of a file, in a specified range of bytes. It also include a selftest suite that tests some typical usage. Currently, the syscall is only wired in for x86 architecture. This interface is inspired by past discussion and concerns with fincore, which has a similar design (and as a result, issues) as mincore. Relevant links: https://lkml.indiana.edu/hypermail/linux/kernel/1302.1/04207.html https://lkml.indiana.edu/hypermail/linux/kernel/1302.1/04209.html I have also developed a small tool that computes the memory usage of files and directories, analogous to the du utility. User can choose between mincore or cachestat (with cachestat exporting more information than mincore). To compare the performance of these two options, I benchmarked the tool on the root directory of a Meta's server machine, each for five runs: Using cachestat real -- Median: 33.377s, Average: 33.475s, Standard Deviation: 0.3602 user -- Median: 4.08s, Average: 4.1078s, Standard Deviation: 0.0742 sys -- Median: 28.823s, Average: 28.8866s, Standard Deviation: 0.2689 Using mincore: real -- Median: 102.352s, Average: 102.3442s, Standard Deviation: 0.2059 user -- Median: 10.149s, Average: 10.1482s, Standard Deviation: 0.0162 sys -- Median: 91.186s, Average: 91.2084s, Standard Deviation: 0.2046 I also ran both syscalls on a 2TB sparse file: Using cachestat: real 0m0.009s user 0m0.000s sys 0m0.009s Using mincore: real 0m37.510s user 0m2.934s sys 0m34.558s Very large files like this are the pathological case for mincore. In fact, to compute the stats for a single 2TB file, mincore takes as long as cachestat takes to compute the stats for the entire tree! This could easily happen inadvertently when we run it on subdirectories. Mincore is clearly not suitable for a general-purpose command line tool. Regarding security concerns, cachestat() should not pose any additional issues. The caller already has read permission to the file itself (since they need an fd to that file to call cachestat). This means that the caller can access the underlying data in its entirety, which is a much greater source of information (and as a result, a much greater security risk) than the cache status itself. The latest API change (in v13 of the patch series) is suggested by Jens Axboe. It allows for 64-bit length argument, even on 32-bit architecture (which is previously not possible due to the limit on the number of syscall arguments). Furthermore, it eliminates the need for compatibility handling - every user can use the same ABI. This patch (of 4): In preparation for computing recently evicted pages in cachestat, refactor workingset_refault and lru_gen_refault to expose a helper function that would test if an evicted page is recently evicted. [penguin-kernel@I-love.SAKURA.ne.jp: add missing rcu_read_unlock() in lru_gen_refault()] Link: https://lkml.kernel.org/r/610781bc-cf11-fc89-a46f-87cb8235d439@I-love.SAKURA.ne.jp Link: https://lkml.kernel.org/r/20230503013608.2431726-1-nphamcs@gmail.com Link: https://lkml.kernel.org/r/20230503013608.2431726-2-nphamcs@gmail.com Signed-off-by: Nhat Pham Signed-off-by: Tetsuo Handa Acked-by: Johannes Weiner Cc: Brian Foster Cc: Johannes Weiner Cc: Matthew Wilcox (Oracle) Cc: Michael Kerrisk Cc: Tetsuo Handa Signed-off-by: Andrew Morton --- include/linux/swap.h | 1 + mm/workingset.c | 150 ++++++++++++++++++++++++++++++++++----------------- 2 files changed, 103 insertions(+), 48 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index 3c69cb653cb9..b2128df5edea 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -368,6 +368,7 @@ static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry) } /* linux/mm/workingset.c */ +bool workingset_test_recent(void *shadow, bool file, bool *workingset); void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg); void workingset_refault(struct folio *folio, void *shadow); diff --git a/mm/workingset.c b/mm/workingset.c index 817758951886..90ae785d4c9c 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -255,6 +255,29 @@ static void *lru_gen_eviction(struct folio *folio) return pack_shadow(mem_cgroup_id(memcg), pgdat, token, refs); } +/* + * Tests if the shadow entry is for a folio that was recently evicted. + * Fills in @memcgid, @pglist_data, @token, @workingset with the values + * unpacked from shadow. + */ +static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid, + struct pglist_data **pgdat, unsigned long *token, bool *workingset) +{ + struct mem_cgroup *eviction_memcg; + struct lruvec *lruvec; + struct lru_gen_folio *lrugen; + unsigned long min_seq; + + unpack_shadow(shadow, memcgid, pgdat, token, workingset); + eviction_memcg = mem_cgroup_from_id(*memcgid); + + lruvec = mem_cgroup_lruvec(eviction_memcg, *pgdat); + lrugen = &lruvec->lrugen; + + min_seq = READ_ONCE(lrugen->min_seq[file]); + return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)); +} + static void lru_gen_refault(struct folio *folio, void *shadow) { int hist, tier, refs; @@ -269,23 +292,22 @@ static void lru_gen_refault(struct folio *folio, void *shadow) int type = folio_is_file_lru(folio); int delta = folio_nr_pages(folio); - unpack_shadow(shadow, &memcg_id, &pgdat, &token, &workingset); - - if (pgdat != folio_pgdat(folio)) - return; - rcu_read_lock(); + if (!lru_gen_test_recent(shadow, type, &memcg_id, &pgdat, &token, + &workingset)) + goto unlock; + memcg = folio_memcg_rcu(folio); if (memcg_id != mem_cgroup_id(memcg)) goto unlock; + if (pgdat != folio_pgdat(folio)) + goto unlock; + lruvec = mem_cgroup_lruvec(memcg, pgdat); lrugen = &lruvec->lrugen; - min_seq = READ_ONCE(lrugen->min_seq[type]); - if ((token >> LRU_REFS_WIDTH) != (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH))) - goto unlock; hist = lru_hist_from_seq(min_seq); /* see the comment in folio_lru_refs() */ @@ -317,6 +339,12 @@ static void *lru_gen_eviction(struct folio *folio) return NULL; } +static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid, + struct pglist_data **pgdat, unsigned long *token, bool *workingset) +{ + return false; +} + static void lru_gen_refault(struct folio *folio, void *shadow) { } @@ -385,42 +413,34 @@ void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg) } /** - * workingset_refault - Evaluate the refault of a previously evicted folio. - * @folio: The freshly allocated replacement folio. - * @shadow: Shadow entry of the evicted folio. - * - * Calculates and evaluates the refault distance of the previously - * evicted folio in the context of the node and the memcg whose memory - * pressure caused the eviction. + * workingset_test_recent - tests if the shadow entry is for a folio that was + * recently evicted. Also fills in @workingset with the value unpacked from + * shadow. + * @shadow: the shadow entry to be tested. + * @file: whether the corresponding folio is from the file lru. + * @workingset: where the workingset value unpacked from shadow should + * be stored. + * + * Return: true if the shadow is for a recently evicted folio; false otherwise. */ -void workingset_refault(struct folio *folio, void *shadow) +bool workingset_test_recent(void *shadow, bool file, bool *workingset) { - bool file = folio_is_file_lru(folio); struct mem_cgroup *eviction_memcg; struct lruvec *eviction_lruvec; unsigned long refault_distance; unsigned long workingset_size; - struct pglist_data *pgdat; - struct mem_cgroup *memcg; - unsigned long eviction; - struct lruvec *lruvec; unsigned long refault; - bool workingset; int memcgid; - long nr; + struct pglist_data *pgdat; + unsigned long eviction; - if (lru_gen_enabled()) { - lru_gen_refault(folio, shadow); - return; - } + if (lru_gen_enabled()) + return lru_gen_test_recent(shadow, file, &memcgid, &pgdat, &eviction, + workingset); - unpack_shadow(shadow, &memcgid, &pgdat, &eviction, &workingset); + unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); eviction <<= bucket_order; - /* Flush stats (and potentially sleep) before holding RCU read lock */ - mem_cgroup_flush_stats_ratelimited(); - - rcu_read_lock(); /* * Look up the memcg associated with the stored ID. It might * have been deleted since the folio's eviction. @@ -439,7 +459,8 @@ void workingset_refault(struct folio *folio, void *shadow) */ eviction_memcg = mem_cgroup_from_id(memcgid); if (!mem_cgroup_disabled() && !eviction_memcg) - goto out; + return false; + eviction_lruvec = mem_cgroup_lruvec(eviction_memcg, pgdat); refault = atomic_long_read(&eviction_lruvec->nonresident_age); @@ -461,20 +482,6 @@ void workingset_refault(struct folio *folio, void *shadow) */ refault_distance = (refault - eviction) & EVICTION_MASK; - /* - * The activation decision for this folio is made at the level - * where the eviction occurred, as that is where the LRU order - * during folio reclaim is being determined. - * - * However, the cgroup that will own the folio is the one that - * is actually experiencing the refault event. - */ - nr = folio_nr_pages(folio); - memcg = folio_memcg(folio); - pgdat = folio_pgdat(folio); - lruvec = mem_cgroup_lruvec(memcg, pgdat); - - mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); /* * Compare the distance to the existing workingset size. We * don't activate pages that couldn't stay resident even if @@ -495,7 +502,54 @@ void workingset_refault(struct folio *folio, void *shadow) NR_INACTIVE_ANON); } } - if (refault_distance > workingset_size) + + return refault_distance <= workingset_size; +} + +/** + * workingset_refault - Evaluate the refault of a previously evicted folio. + * @folio: The freshly allocated replacement folio. + * @shadow: Shadow entry of the evicted folio. + * + * Calculates and evaluates the refault distance of the previously + * evicted folio in the context of the node and the memcg whose memory + * pressure caused the eviction. + */ +void workingset_refault(struct folio *folio, void *shadow) +{ + bool file = folio_is_file_lru(folio); + struct pglist_data *pgdat; + struct mem_cgroup *memcg; + struct lruvec *lruvec; + bool workingset; + long nr; + + if (lru_gen_enabled()) { + lru_gen_refault(folio, shadow); + return; + } + + /* Flush stats (and potentially sleep) before holding RCU read lock */ + mem_cgroup_flush_stats_ratelimited(); + + rcu_read_lock(); + + /* + * The activation decision for this folio is made at the level + * where the eviction occurred, as that is where the LRU order + * during folio reclaim is being determined. + * + * However, the cgroup that will own the folio is the one that + * is actually experiencing the refault event. + */ + nr = folio_nr_pages(folio); + memcg = folio_memcg(folio); + pgdat = folio_pgdat(folio); + lruvec = mem_cgroup_lruvec(memcg, pgdat); + + mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file, nr); + + if (!workingset_test_recent(shadow, file, &workingset)) goto out; folio_set_active(folio); -- cgit v1.2.3 From cf264e1329fb0307e044f7675849f9f38b44c11a Mon Sep 17 00:00:00 2001 From: Nhat Pham Date: Tue, 2 May 2023 18:36:07 -0700 Subject: cachestat: implement cachestat syscall There is currently no good way to query the page cache state of large file sets and directory trees. There is mincore(), but it scales poorly: the kernel writes out a lot of bitmap data that userspace has to aggregate, when the user really doesn not care about per-page information in that case. The user also needs to mmap and unmap each file as it goes along, which can be quite slow as well. Some use cases where this information could come in handy: * Allowing database to decide whether to perform an index scan or direct table queries based on the in-memory cache state of the index. * Visibility into the writeback algorithm, for performance issues diagnostic. * Workload-aware writeback pacing: estimating IO fulfilled by page cache (and IO to be done) within a range of a file, allowing for more frequent syncing when and where there is IO capacity, and batching when there is not. * Computing memory usage of large files/directory trees, analogous to the du tool for disk usage. More information about these use cases could be found in the following thread: https://lore.kernel.org/lkml/20230315170934.GA97793@cmpxchg.org/ This patch implements a new syscall that queries cache state of a file and summarizes the number of cached pages, number of dirty pages, number of pages marked for writeback, number of (recently) evicted pages, etc. in a given range. Currently, the syscall is only wired in for x86 architecture. NAME cachestat - query the page cache statistics of a file. SYNOPSIS #include struct cachestat_range { __u64 off; __u64 len; }; struct cachestat { __u64 nr_cache; __u64 nr_dirty; __u64 nr_writeback; __u64 nr_evicted; __u64 nr_recently_evicted; }; int cachestat(unsigned int fd, struct cachestat_range *cstat_range, struct cachestat *cstat, unsigned int flags); DESCRIPTION cachestat() queries the number of cached pages, number of dirty pages, number of pages marked for writeback, number of evicted pages, number of recently evicted pages, in the bytes range given by `off` and `len`. An evicted page is a page that is previously in the page cache but has been evicted since. A page is recently evicted if its last eviction was recent enough that its reentry to the cache would indicate that it is actively being used by the system, and that there is memory pressure on the system. These values are returned in a cachestat struct, whose address is given by the `cstat` argument. The `off` and `len` arguments must be non-negative integers. If `len` > 0, the queried range is [`off`, `off` + `len`]. If `len` == 0, we will query in the range from `off` to the end of the file. The `flags` argument is unused for now, but is included for future extensibility. User should pass 0 (i.e no flag specified). Currently, hugetlbfs is not supported. Because the status of a page can change after cachestat() checks it but before it returns to the application, the returned values may contain stale information. RETURN VALUE On success, cachestat returns 0. On error, -1 is returned, and errno is set to indicate the error. ERRORS EFAULT cstat or cstat_args points to an invalid address. EINVAL invalid flags. EBADF invalid file descriptor. EOPNOTSUPP file descriptor is of a hugetlbfs file [nphamcs@gmail.com: replace rounddown logic with the existing helper] Link: https://lkml.kernel.org/r/20230504022044.3675469-1-nphamcs@gmail.com Link: https://lkml.kernel.org/r/20230503013608.2431726-3-nphamcs@gmail.com Signed-off-by: Nhat Pham Acked-by: Johannes Weiner Cc: Brian Foster Cc: Matthew Wilcox (Oracle) Cc: Michael Kerrisk Signed-off-by: Andrew Morton --- arch/x86/entry/syscalls/syscall_32.tbl | 1 + arch/x86/entry/syscalls/syscall_64.tbl | 1 + include/linux/syscalls.h | 5 + include/uapi/asm-generic/unistd.h | 5 +- include/uapi/linux/mman.h | 14 +++ init/Kconfig | 10 ++ kernel/sys_ni.c | 1 + mm/filemap.c | 171 +++++++++++++++++++++++++++++++++ 8 files changed, 207 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl index 320480a8db4f..bc0a3c941b35 100644 --- a/arch/x86/entry/syscalls/syscall_32.tbl +++ b/arch/x86/entry/syscalls/syscall_32.tbl @@ -455,3 +455,4 @@ 448 i386 process_mrelease sys_process_mrelease 449 i386 futex_waitv sys_futex_waitv 450 i386 set_mempolicy_home_node sys_set_mempolicy_home_node +451 i386 cachestat sys_cachestat diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl index c84d12608cd2..227538b0ce80 100644 --- a/arch/x86/entry/syscalls/syscall_64.tbl +++ b/arch/x86/entry/syscalls/syscall_64.tbl @@ -372,6 +372,7 @@ 448 common process_mrelease sys_process_mrelease 449 common futex_waitv sys_futex_waitv 450 common set_mempolicy_home_node sys_set_mempolicy_home_node +451 common cachestat sys_cachestat # # Due to a historical design error, certain syscalls are numbered differently diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 33a0ee3bcb2e..6648c07c4381 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -72,6 +72,8 @@ struct open_how; struct mount_attr; struct landlock_ruleset_attr; enum landlock_rule_type; +struct cachestat_range; +struct cachestat; #include #include @@ -1058,6 +1060,9 @@ asmlinkage long sys_memfd_secret(unsigned int flags); asmlinkage long sys_set_mempolicy_home_node(unsigned long start, unsigned long len, unsigned long home_node, unsigned long flags); +asmlinkage long sys_cachestat(unsigned int fd, + struct cachestat_range __user *cstat_range, + struct cachestat __user *cstat, unsigned int flags); /* * Architecture-specific system calls diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h index 45fa180cc56a..cd639fae9086 100644 --- a/include/uapi/asm-generic/unistd.h +++ b/include/uapi/asm-generic/unistd.h @@ -886,8 +886,11 @@ __SYSCALL(__NR_futex_waitv, sys_futex_waitv) #define __NR_set_mempolicy_home_node 450 __SYSCALL(__NR_set_mempolicy_home_node, sys_set_mempolicy_home_node) +#define __NR_cachestat 451 +__SYSCALL(__NR_cachestat, sys_cachestat) + #undef __NR_syscalls -#define __NR_syscalls 451 +#define __NR_syscalls 452 /* * 32 bit systems traditionally used different diff --git a/include/uapi/linux/mman.h b/include/uapi/linux/mman.h index f55bc680b5b0..a246e11988d5 100644 --- a/include/uapi/linux/mman.h +++ b/include/uapi/linux/mman.h @@ -4,6 +4,7 @@ #include #include +#include #define MREMAP_MAYMOVE 1 #define MREMAP_FIXED 2 @@ -41,4 +42,17 @@ #define MAP_HUGE_2GB HUGETLB_FLAG_ENCODE_2GB #define MAP_HUGE_16GB HUGETLB_FLAG_ENCODE_16GB +struct cachestat_range { + __u64 off; + __u64 len; +}; + +struct cachestat { + __u64 nr_cache; + __u64 nr_dirty; + __u64 nr_writeback; + __u64 nr_evicted; + __u64 nr_recently_evicted; +}; + #endif /* _UAPI_LINUX_MMAN_H */ diff --git a/init/Kconfig b/init/Kconfig index 32c24950c4ce..f7f65af4ee12 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1771,6 +1771,16 @@ config RSEQ If unsure, say Y. +config CACHESTAT_SYSCALL + bool "Enable cachestat() system call" if EXPERT + default y + help + Enable the cachestat system call, which queries the page cache + statistics of a file (number of cached pages, dirty pages, + pages marked for writeback, (recently) evicted pages). + + If unsure say Y here. + config DEBUG_RSEQ default n bool "Enabled debugging of rseq() system call" if EXPERT diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 860b2dcf3ac4..04bfb1e4d377 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -299,6 +299,7 @@ COND_SYSCALL(set_mempolicy); COND_SYSCALL(migrate_pages); COND_SYSCALL(move_pages); COND_SYSCALL(set_mempolicy_home_node); +COND_SYSCALL(cachestat); COND_SYSCALL(perf_event_open); COND_SYSCALL(accept4); diff --git a/mm/filemap.c b/mm/filemap.c index b4c9bd368b7e..2d3d70c64dfd 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include #include @@ -58,6 +59,8 @@ #include +#include "swap.h" + /* * Shared mappings implemented 30.11.1994. It's not fully working yet, * though. @@ -4119,3 +4122,171 @@ bool filemap_release_folio(struct folio *folio, gfp_t gfp) return try_to_free_buffers(folio); } EXPORT_SYMBOL(filemap_release_folio); + +#ifdef CONFIG_CACHESTAT_SYSCALL +/** + * filemap_cachestat() - compute the page cache statistics of a mapping + * @mapping: The mapping to compute the statistics for. + * @first_index: The starting page cache index. + * @last_index: The final page index (inclusive). + * @cs: the cachestat struct to write the result to. + * + * This will query the page cache statistics of a mapping in the + * page range of [first_index, last_index] (inclusive). The statistics + * queried include: number of dirty pages, number of pages marked for + * writeback, and the number of (recently) evicted pages. + */ +static void filemap_cachestat(struct address_space *mapping, + pgoff_t first_index, pgoff_t last_index, struct cachestat *cs) +{ + XA_STATE(xas, &mapping->i_pages, first_index); + struct folio *folio; + + rcu_read_lock(); + xas_for_each(&xas, folio, last_index) { + unsigned long nr_pages; + pgoff_t folio_first_index, folio_last_index; + + if (xas_retry(&xas, folio)) + continue; + + if (xa_is_value(folio)) { + /* page is evicted */ + void *shadow = (void *)folio; + bool workingset; /* not used */ + int order = xa_get_order(xas.xa, xas.xa_index); + + nr_pages = 1 << order; + folio_first_index = round_down(xas.xa_index, 1 << order); + folio_last_index = folio_first_index + nr_pages - 1; + + /* Folios might straddle the range boundaries, only count covered pages */ + if (folio_first_index < first_index) + nr_pages -= first_index - folio_first_index; + + if (folio_last_index > last_index) + nr_pages -= folio_last_index - last_index; + + cs->nr_evicted += nr_pages; + +#ifdef CONFIG_SWAP /* implies CONFIG_MMU */ + if (shmem_mapping(mapping)) { + /* shmem file - in swap cache */ + swp_entry_t swp = radix_to_swp_entry(folio); + + shadow = get_shadow_from_swap_cache(swp); + } +#endif + if (workingset_test_recent(shadow, true, &workingset)) + cs->nr_recently_evicted += nr_pages; + + goto resched; + } + + nr_pages = folio_nr_pages(folio); + folio_first_index = folio_pgoff(folio); + folio_last_index = folio_first_index + nr_pages - 1; + + /* Folios might straddle the range boundaries, only count covered pages */ + if (folio_first_index < first_index) + nr_pages -= first_index - folio_first_index; + + if (folio_last_index > last_index) + nr_pages -= folio_last_index - last_index; + + /* page is in cache */ + cs->nr_cache += nr_pages; + + if (folio_test_dirty(folio)) + cs->nr_dirty += nr_pages; + + if (folio_test_writeback(folio)) + cs->nr_writeback += nr_pages; + +resched: + if (need_resched()) { + xas_pause(&xas); + cond_resched_rcu(); + } + } + rcu_read_unlock(); +} + +/* + * The cachestat(2) system call. + * + * cachestat() returns the page cache statistics of a file in the + * bytes range specified by `off` and `len`: number of cached pages, + * number of dirty pages, number of pages marked for writeback, + * number of evicted pages, and number of recently evicted pages. + * + * An evicted page is a page that is previously in the page cache + * but has been evicted since. A page is recently evicted if its last + * eviction was recent enough that its reentry to the cache would + * indicate that it is actively being used by the system, and that + * there is memory pressure on the system. + * + * `off` and `len` must be non-negative integers. If `len` > 0, + * the queried range is [`off`, `off` + `len`]. If `len` == 0, + * we will query in the range from `off` to the end of the file. + * + * The `flags` argument is unused for now, but is included for future + * extensibility. User should pass 0 (i.e no flag specified). + * + * Currently, hugetlbfs is not supported. + * + * Because the status of a page can change after cachestat() checks it + * but before it returns to the application, the returned values may + * contain stale information. + * + * return values: + * zero - success + * -EFAULT - cstat or cstat_range points to an illegal address + * -EINVAL - invalid flags + * -EBADF - invalid file descriptor + * -EOPNOTSUPP - file descriptor is of a hugetlbfs file + */ +SYSCALL_DEFINE4(cachestat, unsigned int, fd, + struct cachestat_range __user *, cstat_range, + struct cachestat __user *, cstat, unsigned int, flags) +{ + struct fd f = fdget(fd); + struct address_space *mapping; + struct cachestat_range csr; + struct cachestat cs; + pgoff_t first_index, last_index; + + if (!f.file) + return -EBADF; + + if (copy_from_user(&csr, cstat_range, + sizeof(struct cachestat_range))) { + fdput(f); + return -EFAULT; + } + + /* hugetlbfs is not supported */ + if (is_file_hugepages(f.file)) { + fdput(f); + return -EOPNOTSUPP; + } + + if (flags != 0) { + fdput(f); + return -EINVAL; + } + + first_index = csr.off >> PAGE_SHIFT; + last_index = + csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; + memset(&cs, 0, sizeof(struct cachestat)); + mapping = f.file->f_mapping; + filemap_cachestat(mapping, first_index, last_index, &cs); + fdput(f); + + if (copy_to_user(cstat, &cs, sizeof(struct cachestat))) + return -EFAULT; + + return 0; +} +#endif /* CONFIG_CACHESTAT_SYSCALL */ -- cgit v1.2.3 From 9f297db35667021925b67e27d81463e38445041c Mon Sep 17 00:00:00 2001 From: Keith Busch Date: Thu, 26 Jan 2023 13:51:25 -0800 Subject: dmapool: create/destroy cleanup Set the 'empty' bool directly from the result of the function that determines its value instead of adding additional logic. Link: https://lkml.kernel.org/r/20230126215125.4069751-13-kbusch@meta.com Fixes: 2d55c16c0c54 ("dmapool: create/destroy cleanup") Signed-off-by: Keith Busch Reviewed-by: Christoph Hellwig Cc: Matthew Wilcox Cc: Tony Battersby Signed-off-by: Andrew Morton --- mm/dmapool.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/dmapool.c b/mm/dmapool.c index d2b0f8fc9649..a151a21e571b 100644 --- a/mm/dmapool.c +++ b/mm/dmapool.c @@ -226,7 +226,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, { struct dma_pool *retval; size_t allocation; - bool empty = false; + bool empty; if (!dev) return NULL; @@ -276,8 +276,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev, */ mutex_lock(&pools_reg_lock); mutex_lock(&pools_lock); - if (list_empty(&dev->dma_pools)) - empty = true; + empty = list_empty(&dev->dma_pools); list_add(&retval->pools, &dev->dma_pools); mutex_unlock(&pools_lock); if (empty) { @@ -361,7 +360,7 @@ static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) void dma_pool_destroy(struct dma_pool *pool) { struct dma_page *page, *tmp; - bool empty = false, busy = false; + bool empty, busy = false; if (unlikely(!pool)) return; @@ -369,8 +368,7 @@ void dma_pool_destroy(struct dma_pool *pool) mutex_lock(&pools_reg_lock); mutex_lock(&pools_lock); list_del(&pool->pools); - if (list_empty(&pool->dev->dma_pools)) - empty = true; + empty = list_empty(&pool->dev->dma_pools); mutex_unlock(&pools_lock); if (empty) device_remove_file(pool->dev, &dev_attr_pools); -- cgit v1.2.3 From 311150343e69ed4fe6380d2de74602a2a29df7c2 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Sat, 6 May 2023 15:05:25 +0100 Subject: mm/gup: add missing gup_must_unshare() check to gup_huge_pgd() All other instances of gup_huge_pXd() perform the unshare check, so update the PGD-specific function to do so as well. While checking pgd_write() might seem unusual, this function already performs such a check via pgd_access_permitted() so this is in line with the existing implementation. David said: : This change makes unshare handling across all GUP-fast variants : consistent, which is desirable as GUP-fast is complicated enough : already even when consistent. : : This function was the only one I seemed to have missed (or left out and : forgot why -- maybe because it's really dead code for now). The COW : selftest would identify the problem, so far there was no report. : Either the selftest wasn't run on corresponding architectures with that : hugetlb size, or that code is still dead code and unused by : architectures. : : the original commit(s) that added unsharing explain why we care about : these checks: : : a7f226604170acd6 ("mm/gup: trigger FAULT_FLAG_UNSHARE when R/O-pinning a possibly shared anonymous page") : 84209e87c6963f92 ("mm/gup: reliable R/O long-term pinning in COW mappings") Link: https://lkml.kernel.org/r/cb971ac8dd315df97058ea69442ecc007b9a364a.1683381545.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Suggested-by: David Hildenbrand Acked-by: David Hildenbrand Signed-off-by: Andrew Morton --- mm/gup.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index bbe416236593..e19b06a66229 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2755,6 +2755,11 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, return 0; } + if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { + gup_put_folio(folio, refs, flags); + return 0; + } + *nr += refs; folio_set_referenced(folio); return 1; -- cgit v1.2.3 From c963901197188189e85b4d768a059fe1bbc2a502 Mon Sep 17 00:00:00 2001 From: Pankaj Raghav Date: Wed, 10 May 2023 14:47:16 +0200 Subject: filemap: remove page_endio() page_endio() is not used anymore. Remove it. Link: https://lkml.kernel.org/r/20230510124716.73655-1-p.raghav@samsung.com Signed-off-by: Pankaj Raghav Reviewed-by: Christoph Hellwig Acked-by: Matthew Wilcox (Oracle) Cc: Luis Chamberlain Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 2 -- mm/filemap.c | 30 ------------------------------ 2 files changed, 32 deletions(-) (limited to 'mm') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index a56308a9d1a4..c1ae5ebc375f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -1078,8 +1078,6 @@ int filemap_migrate_folio(struct address_space *mapping, struct folio *dst, #else #define filemap_migrate_folio NULL #endif -void page_endio(struct page *page, bool is_write, int err); - void folio_end_private_2(struct folio *folio); void folio_wait_private_2(struct folio *folio); int folio_wait_private_2_killable(struct folio *folio); diff --git a/mm/filemap.c b/mm/filemap.c index 2d3d70c64dfd..570bc8c3db87 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1628,36 +1628,6 @@ void folio_end_writeback(struct folio *folio) } EXPORT_SYMBOL(folio_end_writeback); -/* - * After completing I/O on a page, call this routine to update the page - * flags appropriately - */ -void page_endio(struct page *page, bool is_write, int err) -{ - struct folio *folio = page_folio(page); - - if (!is_write) { - if (!err) { - folio_mark_uptodate(folio); - } else { - folio_clear_uptodate(folio); - folio_set_error(folio); - } - folio_unlock(folio); - } else { - if (err) { - struct address_space *mapping; - - folio_set_error(folio); - mapping = folio_mapping(folio); - if (mapping) - mapping_set_error(mapping, err); - } - folio_end_writeback(folio); - } -} -EXPORT_SYMBOL_GPL(page_endio); - /** * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it. * @folio: The folio to lock -- cgit v1.2.3 From 501350459b1fe7a0da6d089484fa112ff48f5252 Mon Sep 17 00:00:00 2001 From: Rick Wertenbroek Date: Wed, 10 May 2023 11:07:57 +0200 Subject: mm: memory_hotplug: fix format string in warnings The format string in __add_pages and __remove_pages has a typo and prints e.g., "Misaligned __add_pages start: 0xfc605 end: #fc609" instead of "Misaligned __add_pages start: 0xfc605 end: 0xfc609" Fix "#%lx" => "%#lx" Link: https://lkml.kernel.org/r/20230510090758.3537242-1-rick.wertenbroek@gmail.com Signed-off-by: Rick Wertenbroek Reviewed-by: David Hildenbrand Reviewed-by: Mike Rapoport (IBM) Cc: Oscar Salvador Signed-off-by: Andrew Morton --- mm/memory_hotplug.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 8e0fa209d533..9061ac69b1b6 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -325,7 +325,7 @@ int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, } if (check_pfn_span(pfn, nr_pages)) { - WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); + WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1); return -EINVAL; } @@ -525,7 +525,7 @@ void __remove_pages(unsigned long pfn, unsigned long nr_pages, map_offset = vmem_altmap_offset(altmap); if (check_pfn_span(pfn, nr_pages)) { - WARN(1, "Misaligned %s start: %#lx end: #%lx\n", __func__, pfn, pfn + nr_pages - 1); + WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1); return; } -- cgit v1.2.3 From 124abced647306aa3badb5d472c3616de23f180a Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Wed, 10 May 2023 11:18:29 +0800 Subject: migrate_pages_batch: simplify retrying and failure counting of large folios After recent changes to the retrying and failure counting in migrate_pages_batch(), it was found that it's unnecessary to count retrying and failure for normal, large, and THP folios separately. Because we don't use retrying and failure number of large folios directly. So, in this patch, we simplified retrying and failure counting of large folios via counting retrying and failure of normal and large folios together. This results in the reduced line number. Previously, in migrate_pages_batch we need to track whether the source folio is large/THP before splitting. So is_large is used to cache folio_test_large() result. Now, we don't need that variable any more because we don't count retrying and failure of large folios (only counting that of THP folios). So, in this patch, is_large is removed to simplify the code. This is just code cleanup, no functionality changes are expected. Link: https://lkml.kernel.org/r/20230510031829.11513-1-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: Xin Hao Reviewed-by: Zi Yan Reviewed-by: Alistair Popple Cc: Yang Shi Cc: Baolin Wang Cc: Oscar Salvador Signed-off-by: Andrew Morton --- mm/migrate.c | 112 +++++++++++++++++++---------------------------------------- 1 file changed, 36 insertions(+), 76 deletions(-) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index 43d818fd1afd..cb292d2a90ce 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1617,13 +1617,10 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, int nr_pass) { int retry = 1; - int large_retry = 1; int thp_retry = 1; int nr_failed = 0; int nr_retry_pages = 0; - int nr_large_failed = 0; int pass = 0; - bool is_large = false; bool is_thp = false; struct folio *folio, *folio2, *dst = NULL, *dst2; int rc, rc_saved = 0, nr_pages; @@ -1634,20 +1631,13 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC && !list_empty(from) && !list_is_singular(from)); - for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) { + for (pass = 0; pass < nr_pass && retry; pass++) { retry = 0; - large_retry = 0; thp_retry = 0; nr_retry_pages = 0; list_for_each_entry_safe(folio, folio2, from, lru) { - /* - * Large folio statistics is based on the source large - * folio. Capture required information that might get - * lost during migration. - */ - is_large = folio_test_large(folio); - is_thp = is_large && folio_test_pmd_mappable(folio); + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); nr_pages = folio_nr_pages(folio); cond_resched(); @@ -1663,7 +1653,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, * list is processed. */ if (!thp_migration_supported() && is_thp) { - nr_large_failed++; + nr_failed++; stats->nr_thp_failed++; if (!try_split_folio(folio, split_folios)) { stats->nr_thp_split++; @@ -1691,38 +1681,33 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, * When memory is low, don't bother to try to migrate * other folios, move unmapped folios, then exit. */ - if (is_large) { - nr_large_failed++; - stats->nr_thp_failed += is_thp; - /* Large folio NUMA faulting doesn't split to retry. */ - if (!nosplit) { - int ret = try_split_folio(folio, split_folios); - - if (!ret) { - stats->nr_thp_split += is_thp; - break; - } else if (reason == MR_LONGTERM_PIN && - ret == -EAGAIN) { - /* - * Try again to split large folio to - * mitigate the failure of longterm pinning. - */ - large_retry++; - thp_retry += is_thp; - nr_retry_pages += nr_pages; - /* Undo duplicated failure counting. */ - nr_large_failed--; - stats->nr_thp_failed -= is_thp; - break; - } + nr_failed++; + stats->nr_thp_failed += is_thp; + /* Large folio NUMA faulting doesn't split to retry. */ + if (folio_test_large(folio) && !nosplit) { + int ret = try_split_folio(folio, split_folios); + + if (!ret) { + stats->nr_thp_split += is_thp; + break; + } else if (reason == MR_LONGTERM_PIN && + ret == -EAGAIN) { + /* + * Try again to split large folio to + * mitigate the failure of longterm pinning. + */ + retry++; + thp_retry += is_thp; + nr_retry_pages += nr_pages; + /* Undo duplicated failure counting. */ + nr_failed--; + stats->nr_thp_failed -= is_thp; + break; } - } else { - nr_failed++; } stats->nr_failed_pages += nr_pages + nr_retry_pages; /* nr_failed isn't updated for not used */ - nr_large_failed += large_retry; stats->nr_thp_failed += thp_retry; rc_saved = rc; if (list_empty(&unmap_folios)) @@ -1730,12 +1715,8 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, else goto move; case -EAGAIN: - if (is_large) { - large_retry++; - thp_retry += is_thp; - } else { - retry++; - } + retry++; + thp_retry += is_thp; nr_retry_pages += nr_pages; break; case MIGRATEPAGE_SUCCESS: @@ -1753,20 +1734,14 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, * removed from migration folio list and not * retried in the next outer loop. */ - if (is_large) { - nr_large_failed++; - stats->nr_thp_failed += is_thp; - } else { - nr_failed++; - } - + nr_failed++; + stats->nr_thp_failed += is_thp; stats->nr_failed_pages += nr_pages; break; } } } nr_failed += retry; - nr_large_failed += large_retry; stats->nr_thp_failed += thp_retry; stats->nr_failed_pages += nr_retry_pages; move: @@ -1774,17 +1749,15 @@ move: try_to_unmap_flush(); retry = 1; - for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) { + for (pass = 0; pass < nr_pass && retry; pass++) { retry = 0; - large_retry = 0; thp_retry = 0; nr_retry_pages = 0; dst = list_first_entry(&dst_folios, struct folio, lru); dst2 = list_next_entry(dst, lru); list_for_each_entry_safe(folio, folio2, &unmap_folios, lru) { - is_large = folio_test_large(folio); - is_thp = is_large && folio_test_pmd_mappable(folio); + is_thp = folio_test_large(folio) && folio_test_pmd_mappable(folio); nr_pages = folio_nr_pages(folio); cond_resched(); @@ -1800,12 +1773,8 @@ move: */ switch(rc) { case -EAGAIN: - if (is_large) { - large_retry++; - thp_retry += is_thp; - } else { - retry++; - } + retry++; + thp_retry += is_thp; nr_retry_pages += nr_pages; break; case MIGRATEPAGE_SUCCESS: @@ -1813,13 +1782,8 @@ move: stats->nr_thp_succeeded += is_thp; break; default: - if (is_large) { - nr_large_failed++; - stats->nr_thp_failed += is_thp; - } else { - nr_failed++; - } - + nr_failed++; + stats->nr_thp_failed += is_thp; stats->nr_failed_pages += nr_pages; break; } @@ -1828,14 +1792,10 @@ move: } } nr_failed += retry; - nr_large_failed += large_retry; stats->nr_thp_failed += thp_retry; stats->nr_failed_pages += nr_retry_pages; - if (rc_saved) - rc = rc_saved; - else - rc = nr_failed + nr_large_failed; + rc = rc_saved ? : nr_failed; out: /* Cleanup remaining folios */ dst = list_first_entry(&dst_folios, struct folio, lru); -- cgit v1.2.3 From fb646a4cd3f0ff27d19911bef7b6622263723df6 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 9 May 2023 16:57:20 +0200 Subject: kasan: add kasan_tag_mismatch prototype The kasan sw-tags implementation contains one function that is only called from assembler and has no prototype in a header. This causes a W=1 warning: mm/kasan/sw_tags.c:171:6: warning: no previous prototype for 'kasan_tag_mismatch' [-Wmissing-prototypes] 171 | void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, Add a prototype in the local header to get a clean build. Link: https://lkml.kernel.org/r/20230509145735.9263-1-arnd@kernel.org Signed-off-by: Arnd Bergmann Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Cc: Vincenzo Frascino Cc: Signed-off-by: Andrew Morton --- mm/kasan/kasan.h | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index f5e4f5f2ba20..cd846ca34f44 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -646,4 +646,7 @@ void *__hwasan_memset(void *addr, int c, size_t len); void *__hwasan_memmove(void *dest, const void *src, size_t len); void *__hwasan_memcpy(void *dest, const void *src, size_t len); +void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, + unsigned long ret_ip); + #endif /* __MM_KASAN_KASAN_H */ -- cgit v1.2.3 From bb6e04a173f06e51819a4bb512e127dfbc50dcfa Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 9 May 2023 16:57:21 +0200 Subject: kasan: use internal prototypes matching gcc-13 builtins gcc-13 warns about function definitions for builtin interfaces that have a different prototype, e.g.: In file included from kasan_test.c:31: kasan.h:574:6: error: conflicting types for built-in function '__asan_register_globals'; expected 'void(void *, long int)' [-Werror=builtin-declaration-mismatch] 574 | void __asan_register_globals(struct kasan_global *globals, size_t size); kasan.h:577:6: error: conflicting types for built-in function '__asan_alloca_poison'; expected 'void(void *, long int)' [-Werror=builtin-declaration-mismatch] 577 | void __asan_alloca_poison(unsigned long addr, size_t size); kasan.h:580:6: error: conflicting types for built-in function '__asan_load1'; expected 'void(void *)' [-Werror=builtin-declaration-mismatch] 580 | void __asan_load1(unsigned long addr); kasan.h:581:6: error: conflicting types for built-in function '__asan_store1'; expected 'void(void *)' [-Werror=builtin-declaration-mismatch] 581 | void __asan_store1(unsigned long addr); kasan.h:643:6: error: conflicting types for built-in function '__hwasan_tag_memory'; expected 'void(void *, unsigned char, long int)' [-Werror=builtin-declaration-mismatch] 643 | void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size); The two problems are: - Addresses are passes as 'unsigned long' in the kernel, but gcc-13 expects a 'void *'. - sizes meant to use a signed ssize_t rather than size_t. Change all the prototypes to match these. Using 'void *' consistently for addresses gets rid of a couple of type casts, so push that down to the leaf functions where possible. This now passes all randconfig builds on arm, arm64 and x86, but I have not tested it on the other architectures that support kasan, since they tend to fail randconfig builds in other ways. This might fail if any of the 32-bit architectures expect a 'long' instead of 'int' for the size argument. The __asan_allocas_unpoison() function prototype is somewhat weird, since it uses a pointer for 'stack_top' and an size_t for 'stack_bottom'. This looks like it is meant to be 'addr' and 'size' like the others, but the implementation clearly treats them as 'top' and 'bottom'. Link: https://lkml.kernel.org/r/20230509145735.9263-2-arnd@kernel.org Signed-off-by: Arnd Bergmann Cc: Alexander Potapenko Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Cc: Vincenzo Frascino Cc: Signed-off-by: Andrew Morton --- arch/arm64/kernel/traps.c | 2 +- arch/arm64/mm/fault.c | 2 +- include/linux/kasan.h | 2 +- mm/kasan/common.c | 2 +- mm/kasan/generic.c | 72 ++++++++++----------- mm/kasan/kasan.h | 160 +++++++++++++++++++++++----------------------- mm/kasan/report.c | 17 +++-- mm/kasan/report_generic.c | 12 ++-- mm/kasan/report_hw_tags.c | 2 +- mm/kasan/report_sw_tags.c | 2 +- mm/kasan/shadow.c | 36 +++++------ mm/kasan/sw_tags.c | 20 +++--- 12 files changed, 164 insertions(+), 165 deletions(-) (limited to 'mm') diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 4bb1b8f47298..7b889445e5c6 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -1044,7 +1044,7 @@ static int kasan_handler(struct pt_regs *regs, unsigned long esr) bool recover = esr & KASAN_ESR_RECOVER; bool write = esr & KASAN_ESR_WRITE; size_t size = KASAN_ESR_SIZE(esr); - u64 addr = regs->regs[0]; + void *addr = (void *)regs->regs[0]; u64 pc = regs->pc; kasan_report(addr, size, write, pc); diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index cb21ccd7940d..d5047eef4295 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -317,7 +317,7 @@ static void report_tag_fault(unsigned long addr, unsigned long esr, * find out access size. */ bool is_write = !!(esr & ESR_ELx_WNR); - kasan_report(addr, 0, is_write, regs->pc); + kasan_report((void *)addr, 0, is_write, regs->pc); } #else /* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index f7ef70661ce2..819b6bc8ac08 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -343,7 +343,7 @@ static inline void *kasan_reset_tag(const void *addr) * @is_write: whether the bad access is a write or a read * @ip: instruction pointer for the accessibility check or the bad access itself */ -bool kasan_report(unsigned long addr, size_t size, +bool kasan_report(const void *addr, size_t size, bool is_write, unsigned long ip); #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ diff --git a/mm/kasan/common.c b/mm/kasan/common.c index b376a5d055e5..256930da578a 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -445,7 +445,7 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag bool __kasan_check_byte(const void *address, unsigned long ip) { if (!kasan_byte_accessible(address)) { - kasan_report((unsigned long)address, 1, false, ip); + kasan_report(address, 1, false, ip); return false; } return true; diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index e5eef670735e..224d161a5a22 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -40,39 +40,39 @@ * depending on memory access size X. */ -static __always_inline bool memory_is_poisoned_1(unsigned long addr) +static __always_inline bool memory_is_poisoned_1(const void *addr) { - s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr); + s8 shadow_value = *(s8 *)kasan_mem_to_shadow(addr); if (unlikely(shadow_value)) { - s8 last_accessible_byte = addr & KASAN_GRANULE_MASK; + s8 last_accessible_byte = (unsigned long)addr & KASAN_GRANULE_MASK; return unlikely(last_accessible_byte >= shadow_value); } return false; } -static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr, +static __always_inline bool memory_is_poisoned_2_4_8(const void *addr, unsigned long size) { - u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr); + u8 *shadow_addr = (u8 *)kasan_mem_to_shadow(addr); /* * Access crosses 8(shadow size)-byte boundary. Such access maps * into 2 shadow bytes, so we need to check them both. */ - if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1)) + if (unlikely((((unsigned long)addr + size - 1) & KASAN_GRANULE_MASK) < size - 1)) return *shadow_addr || memory_is_poisoned_1(addr + size - 1); return memory_is_poisoned_1(addr + size - 1); } -static __always_inline bool memory_is_poisoned_16(unsigned long addr) +static __always_inline bool memory_is_poisoned_16(const void *addr) { - u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr); + u16 *shadow_addr = (u16 *)kasan_mem_to_shadow(addr); /* Unaligned 16-bytes access maps into 3 shadow bytes. */ - if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE))) + if (unlikely(!IS_ALIGNED((unsigned long)addr, KASAN_GRANULE_SIZE))) return *shadow_addr || memory_is_poisoned_1(addr + 15); return *shadow_addr; @@ -120,26 +120,25 @@ static __always_inline unsigned long memory_is_nonzero(const void *start, return bytes_is_nonzero(start, (end - start) % 8); } -static __always_inline bool memory_is_poisoned_n(unsigned long addr, - size_t size) +static __always_inline bool memory_is_poisoned_n(const void *addr, size_t size) { unsigned long ret; - ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr), - kasan_mem_to_shadow((void *)addr + size - 1) + 1); + ret = memory_is_nonzero(kasan_mem_to_shadow(addr), + kasan_mem_to_shadow(addr + size - 1) + 1); if (unlikely(ret)) { - unsigned long last_byte = addr + size - 1; - s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte); + const void *last_byte = addr + size - 1; + s8 *last_shadow = (s8 *)kasan_mem_to_shadow(last_byte); if (unlikely(ret != (unsigned long)last_shadow || - ((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow))) + (((long)last_byte & KASAN_GRANULE_MASK) >= *last_shadow))) return true; } return false; } -static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) +static __always_inline bool memory_is_poisoned(const void *addr, size_t size) { if (__builtin_constant_p(size)) { switch (size) { @@ -159,7 +158,7 @@ static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size) return memory_is_poisoned_n(addr, size); } -static __always_inline bool check_region_inline(unsigned long addr, +static __always_inline bool check_region_inline(const void *addr, size_t size, bool write, unsigned long ret_ip) { @@ -172,7 +171,7 @@ static __always_inline bool check_region_inline(unsigned long addr, if (unlikely(addr + size < addr)) return !kasan_report(addr, size, write, ret_ip); - if (unlikely(!addr_has_metadata((void *)addr))) + if (unlikely(!addr_has_metadata(addr))) return !kasan_report(addr, size, write, ret_ip); if (likely(!memory_is_poisoned(addr, size))) @@ -181,7 +180,7 @@ static __always_inline bool check_region_inline(unsigned long addr, return !kasan_report(addr, size, write, ret_ip); } -bool kasan_check_range(unsigned long addr, size_t size, bool write, +bool kasan_check_range(const void *addr, size_t size, bool write, unsigned long ret_ip) { return check_region_inline(addr, size, write, ret_ip); @@ -221,36 +220,37 @@ static void register_global(struct kasan_global *global) KASAN_GLOBAL_REDZONE, false); } -void __asan_register_globals(struct kasan_global *globals, size_t size) +void __asan_register_globals(void *ptr, ssize_t size) { int i; + struct kasan_global *globals = ptr; for (i = 0; i < size; i++) register_global(&globals[i]); } EXPORT_SYMBOL(__asan_register_globals); -void __asan_unregister_globals(struct kasan_global *globals, size_t size) +void __asan_unregister_globals(void *ptr, ssize_t size) { } EXPORT_SYMBOL(__asan_unregister_globals); #define DEFINE_ASAN_LOAD_STORE(size) \ - void __asan_load##size(unsigned long addr) \ + void __asan_load##size(void *addr) \ { \ check_region_inline(addr, size, false, _RET_IP_); \ } \ EXPORT_SYMBOL(__asan_load##size); \ __alias(__asan_load##size) \ - void __asan_load##size##_noabort(unsigned long); \ + void __asan_load##size##_noabort(void *); \ EXPORT_SYMBOL(__asan_load##size##_noabort); \ - void __asan_store##size(unsigned long addr) \ + void __asan_store##size(void *addr) \ { \ check_region_inline(addr, size, true, _RET_IP_); \ } \ EXPORT_SYMBOL(__asan_store##size); \ __alias(__asan_store##size) \ - void __asan_store##size##_noabort(unsigned long); \ + void __asan_store##size##_noabort(void *); \ EXPORT_SYMBOL(__asan_store##size##_noabort) DEFINE_ASAN_LOAD_STORE(1); @@ -259,24 +259,24 @@ DEFINE_ASAN_LOAD_STORE(4); DEFINE_ASAN_LOAD_STORE(8); DEFINE_ASAN_LOAD_STORE(16); -void __asan_loadN(unsigned long addr, size_t size) +void __asan_loadN(void *addr, ssize_t size) { kasan_check_range(addr, size, false, _RET_IP_); } EXPORT_SYMBOL(__asan_loadN); __alias(__asan_loadN) -void __asan_loadN_noabort(unsigned long, size_t); +void __asan_loadN_noabort(void *, ssize_t); EXPORT_SYMBOL(__asan_loadN_noabort); -void __asan_storeN(unsigned long addr, size_t size) +void __asan_storeN(void *addr, ssize_t size) { kasan_check_range(addr, size, true, _RET_IP_); } EXPORT_SYMBOL(__asan_storeN); __alias(__asan_storeN) -void __asan_storeN_noabort(unsigned long, size_t); +void __asan_storeN_noabort(void *, ssize_t); EXPORT_SYMBOL(__asan_storeN_noabort); /* to shut up compiler complaints */ @@ -284,7 +284,7 @@ void __asan_handle_no_return(void) {} EXPORT_SYMBOL(__asan_handle_no_return); /* Emitted by compiler to poison alloca()ed objects. */ -void __asan_alloca_poison(unsigned long addr, size_t size) +void __asan_alloca_poison(void *addr, ssize_t size) { size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE); size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) - @@ -295,7 +295,7 @@ void __asan_alloca_poison(unsigned long addr, size_t size) KASAN_ALLOCA_REDZONE_SIZE); const void *right_redzone = (const void *)(addr + rounded_up_size); - WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE)); + WARN_ON(!IS_ALIGNED((unsigned long)addr, KASAN_ALLOCA_REDZONE_SIZE)); kasan_unpoison((const void *)(addr + rounded_down_size), size - rounded_down_size, false); @@ -307,18 +307,18 @@ void __asan_alloca_poison(unsigned long addr, size_t size) EXPORT_SYMBOL(__asan_alloca_poison); /* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */ -void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom) +void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom) { - if (unlikely(!stack_top || stack_top > stack_bottom)) + if (unlikely(!stack_top || stack_top > (void *)stack_bottom)) return; - kasan_unpoison(stack_top, stack_bottom - stack_top, false); + kasan_unpoison(stack_top, (void *)stack_bottom - stack_top, false); } EXPORT_SYMBOL(__asan_allocas_unpoison); /* Emitted by the compiler to [un]poison local variables. */ #define DEFINE_ASAN_SET_SHADOW(byte) \ - void __asan_set_shadow_##byte(const void *addr, size_t size) \ + void __asan_set_shadow_##byte(const void *addr, ssize_t size) \ { \ __memset((void *)addr, 0x##byte, size); \ } \ diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index cd846ca34f44..b799f11e45dc 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -198,13 +198,13 @@ enum kasan_report_type { struct kasan_report_info { /* Filled in by kasan_report_*(). */ enum kasan_report_type type; - void *access_addr; + const void *access_addr; size_t access_size; bool is_write; unsigned long ip; /* Filled in by the common reporting code. */ - void *first_bad_addr; + const void *first_bad_addr; struct kmem_cache *cache; void *object; size_t alloc_size; @@ -311,7 +311,7 @@ static __always_inline bool addr_has_metadata(const void *addr) * @ret_ip: return address * @return: true if access was valid, false if invalid */ -bool kasan_check_range(unsigned long addr, size_t size, bool write, +bool kasan_check_range(const void *addr, size_t size, bool write, unsigned long ret_ip); #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ @@ -323,7 +323,7 @@ static __always_inline bool addr_has_metadata(const void *addr) #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ -void *kasan_find_first_bad_addr(void *addr, size_t size); +const void *kasan_find_first_bad_addr(const void *addr, size_t size); size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache); void kasan_complete_mode_report_info(struct kasan_report_info *info); void kasan_metadata_fetch_row(char *buffer, void *row); @@ -346,7 +346,7 @@ void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object); static inline void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object) { } #endif -bool kasan_report(unsigned long addr, size_t size, +bool kasan_report(const void *addr, size_t size, bool is_write, unsigned long ip); void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report_type type); @@ -571,82 +571,82 @@ void kasan_restore_multi_shot(bool enabled); */ asmlinkage void kasan_unpoison_task_stack_below(const void *watermark); -void __asan_register_globals(struct kasan_global *globals, size_t size); -void __asan_unregister_globals(struct kasan_global *globals, size_t size); +void __asan_register_globals(void *globals, ssize_t size); +void __asan_unregister_globals(void *globals, ssize_t size); void __asan_handle_no_return(void); -void __asan_alloca_poison(unsigned long addr, size_t size); -void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom); - -void __asan_load1(unsigned long addr); -void __asan_store1(unsigned long addr); -void __asan_load2(unsigned long addr); -void __asan_store2(unsigned long addr); -void __asan_load4(unsigned long addr); -void __asan_store4(unsigned long addr); -void __asan_load8(unsigned long addr); -void __asan_store8(unsigned long addr); -void __asan_load16(unsigned long addr); -void __asan_store16(unsigned long addr); -void __asan_loadN(unsigned long addr, size_t size); -void __asan_storeN(unsigned long addr, size_t size); - -void __asan_load1_noabort(unsigned long addr); -void __asan_store1_noabort(unsigned long addr); -void __asan_load2_noabort(unsigned long addr); -void __asan_store2_noabort(unsigned long addr); -void __asan_load4_noabort(unsigned long addr); -void __asan_store4_noabort(unsigned long addr); -void __asan_load8_noabort(unsigned long addr); -void __asan_store8_noabort(unsigned long addr); -void __asan_load16_noabort(unsigned long addr); -void __asan_store16_noabort(unsigned long addr); -void __asan_loadN_noabort(unsigned long addr, size_t size); -void __asan_storeN_noabort(unsigned long addr, size_t size); - -void __asan_report_load1_noabort(unsigned long addr); -void __asan_report_store1_noabort(unsigned long addr); -void __asan_report_load2_noabort(unsigned long addr); -void __asan_report_store2_noabort(unsigned long addr); -void __asan_report_load4_noabort(unsigned long addr); -void __asan_report_store4_noabort(unsigned long addr); -void __asan_report_load8_noabort(unsigned long addr); -void __asan_report_store8_noabort(unsigned long addr); -void __asan_report_load16_noabort(unsigned long addr); -void __asan_report_store16_noabort(unsigned long addr); -void __asan_report_load_n_noabort(unsigned long addr, size_t size); -void __asan_report_store_n_noabort(unsigned long addr, size_t size); - -void __asan_set_shadow_00(const void *addr, size_t size); -void __asan_set_shadow_f1(const void *addr, size_t size); -void __asan_set_shadow_f2(const void *addr, size_t size); -void __asan_set_shadow_f3(const void *addr, size_t size); -void __asan_set_shadow_f5(const void *addr, size_t size); -void __asan_set_shadow_f8(const void *addr, size_t size); - -void *__asan_memset(void *addr, int c, size_t len); -void *__asan_memmove(void *dest, const void *src, size_t len); -void *__asan_memcpy(void *dest, const void *src, size_t len); - -void __hwasan_load1_noabort(unsigned long addr); -void __hwasan_store1_noabort(unsigned long addr); -void __hwasan_load2_noabort(unsigned long addr); -void __hwasan_store2_noabort(unsigned long addr); -void __hwasan_load4_noabort(unsigned long addr); -void __hwasan_store4_noabort(unsigned long addr); -void __hwasan_load8_noabort(unsigned long addr); -void __hwasan_store8_noabort(unsigned long addr); -void __hwasan_load16_noabort(unsigned long addr); -void __hwasan_store16_noabort(unsigned long addr); -void __hwasan_loadN_noabort(unsigned long addr, size_t size); -void __hwasan_storeN_noabort(unsigned long addr, size_t size); - -void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size); - -void *__hwasan_memset(void *addr, int c, size_t len); -void *__hwasan_memmove(void *dest, const void *src, size_t len); -void *__hwasan_memcpy(void *dest, const void *src, size_t len); - -void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, +void __asan_alloca_poison(void *, ssize_t size); +void __asan_allocas_unpoison(void *stack_top, ssize_t stack_bottom); + +void __asan_load1(void *); +void __asan_store1(void *); +void __asan_load2(void *); +void __asan_store2(void *); +void __asan_load4(void *); +void __asan_store4(void *); +void __asan_load8(void *); +void __asan_store8(void *); +void __asan_load16(void *); +void __asan_store16(void *); +void __asan_loadN(void *, ssize_t size); +void __asan_storeN(void *, ssize_t size); + +void __asan_load1_noabort(void *); +void __asan_store1_noabort(void *); +void __asan_load2_noabort(void *); +void __asan_store2_noabort(void *); +void __asan_load4_noabort(void *); +void __asan_store4_noabort(void *); +void __asan_load8_noabort(void *); +void __asan_store8_noabort(void *); +void __asan_load16_noabort(void *); +void __asan_store16_noabort(void *); +void __asan_loadN_noabort(void *, ssize_t size); +void __asan_storeN_noabort(void *, ssize_t size); + +void __asan_report_load1_noabort(void *); +void __asan_report_store1_noabort(void *); +void __asan_report_load2_noabort(void *); +void __asan_report_store2_noabort(void *); +void __asan_report_load4_noabort(void *); +void __asan_report_store4_noabort(void *); +void __asan_report_load8_noabort(void *); +void __asan_report_store8_noabort(void *); +void __asan_report_load16_noabort(void *); +void __asan_report_store16_noabort(void *); +void __asan_report_load_n_noabort(void *, ssize_t size); +void __asan_report_store_n_noabort(void *, ssize_t size); + +void __asan_set_shadow_00(const void *addr, ssize_t size); +void __asan_set_shadow_f1(const void *addr, ssize_t size); +void __asan_set_shadow_f2(const void *addr, ssize_t size); +void __asan_set_shadow_f3(const void *addr, ssize_t size); +void __asan_set_shadow_f5(const void *addr, ssize_t size); +void __asan_set_shadow_f8(const void *addr, ssize_t size); + +void *__asan_memset(void *addr, int c, ssize_t len); +void *__asan_memmove(void *dest, const void *src, ssize_t len); +void *__asan_memcpy(void *dest, const void *src, ssize_t len); + +void __hwasan_load1_noabort(void *); +void __hwasan_store1_noabort(void *); +void __hwasan_load2_noabort(void *); +void __hwasan_store2_noabort(void *); +void __hwasan_load4_noabort(void *); +void __hwasan_store4_noabort(void *); +void __hwasan_load8_noabort(void *); +void __hwasan_store8_noabort(void *); +void __hwasan_load16_noabort(void *); +void __hwasan_store16_noabort(void *); +void __hwasan_loadN_noabort(void *, ssize_t size); +void __hwasan_storeN_noabort(void *, ssize_t size); + +void __hwasan_tag_memory(void *, u8 tag, ssize_t size); + +void *__hwasan_memset(void *addr, int c, ssize_t len); +void *__hwasan_memmove(void *dest, const void *src, ssize_t len); +void *__hwasan_memcpy(void *dest, const void *src, ssize_t len); + +void kasan_tag_mismatch(void *addr, unsigned long access_info, unsigned long ret_ip); #endif /* __MM_KASAN_KASAN_H */ diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 892a9dc9d4d3..84d9f3b37014 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -211,7 +211,7 @@ static void start_report(unsigned long *flags, bool sync) pr_err("==================================================================\n"); } -static void end_report(unsigned long *flags, void *addr) +static void end_report(unsigned long *flags, const void *addr) { if (addr) trace_error_report_end(ERROR_DETECTOR_KASAN, @@ -450,8 +450,8 @@ static void print_memory_metadata(const void *addr) static void print_report(struct kasan_report_info *info) { - void *addr = kasan_reset_tag(info->access_addr); - u8 tag = get_tag(info->access_addr); + void *addr = kasan_reset_tag((void *)info->access_addr); + u8 tag = get_tag((void *)info->access_addr); print_error_description(info); if (addr_has_metadata(addr)) @@ -468,12 +468,12 @@ static void print_report(struct kasan_report_info *info) static void complete_report_info(struct kasan_report_info *info) { - void *addr = kasan_reset_tag(info->access_addr); + void *addr = kasan_reset_tag((void *)info->access_addr); struct slab *slab; if (info->type == KASAN_REPORT_ACCESS) info->first_bad_addr = kasan_find_first_bad_addr( - info->access_addr, info->access_size); + (void *)info->access_addr, info->access_size); else info->first_bad_addr = addr; @@ -544,11 +544,10 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_ty * user_access_save/restore(): kasan_report_invalid_free() cannot be called * from a UACCESS region, and kasan_report_async() is not used on x86. */ -bool kasan_report(unsigned long addr, size_t size, bool is_write, +bool kasan_report(const void *addr, size_t size, bool is_write, unsigned long ip) { bool ret = true; - void *ptr = (void *)addr; unsigned long ua_flags = user_access_save(); unsigned long irq_flags; struct kasan_report_info info; @@ -562,7 +561,7 @@ bool kasan_report(unsigned long addr, size_t size, bool is_write, memset(&info, 0, sizeof(info)); info.type = KASAN_REPORT_ACCESS; - info.access_addr = ptr; + info.access_addr = addr; info.access_size = size; info.is_write = is_write; info.ip = ip; @@ -571,7 +570,7 @@ bool kasan_report(unsigned long addr, size_t size, bool is_write, print_report(&info); - end_report(&irq_flags, ptr); + end_report(&irq_flags, (void *)addr); out: user_access_restore(ua_flags); diff --git a/mm/kasan/report_generic.c b/mm/kasan/report_generic.c index 87d39bc0a673..51a1e8a8877f 100644 --- a/mm/kasan/report_generic.c +++ b/mm/kasan/report_generic.c @@ -30,9 +30,9 @@ #include "kasan.h" #include "../slab.h" -void *kasan_find_first_bad_addr(void *addr, size_t size) +const void *kasan_find_first_bad_addr(const void *addr, size_t size) { - void *p = addr; + const void *p = addr; if (!addr_has_metadata(p)) return p; @@ -362,14 +362,14 @@ void kasan_print_address_stack_frame(const void *addr) #endif /* CONFIG_KASAN_STACK */ #define DEFINE_ASAN_REPORT_LOAD(size) \ -void __asan_report_load##size##_noabort(unsigned long addr) \ +void __asan_report_load##size##_noabort(void *addr) \ { \ kasan_report(addr, size, false, _RET_IP_); \ } \ EXPORT_SYMBOL(__asan_report_load##size##_noabort) #define DEFINE_ASAN_REPORT_STORE(size) \ -void __asan_report_store##size##_noabort(unsigned long addr) \ +void __asan_report_store##size##_noabort(void *addr) \ { \ kasan_report(addr, size, true, _RET_IP_); \ } \ @@ -386,13 +386,13 @@ DEFINE_ASAN_REPORT_STORE(4); DEFINE_ASAN_REPORT_STORE(8); DEFINE_ASAN_REPORT_STORE(16); -void __asan_report_load_n_noabort(unsigned long addr, size_t size) +void __asan_report_load_n_noabort(void *addr, ssize_t size) { kasan_report(addr, size, false, _RET_IP_); } EXPORT_SYMBOL(__asan_report_load_n_noabort); -void __asan_report_store_n_noabort(unsigned long addr, size_t size) +void __asan_report_store_n_noabort(void *addr, ssize_t size) { kasan_report(addr, size, true, _RET_IP_); } diff --git a/mm/kasan/report_hw_tags.c b/mm/kasan/report_hw_tags.c index 32e80f78de7d..065e1b2fc484 100644 --- a/mm/kasan/report_hw_tags.c +++ b/mm/kasan/report_hw_tags.c @@ -15,7 +15,7 @@ #include "kasan.h" -void *kasan_find_first_bad_addr(void *addr, size_t size) +const void *kasan_find_first_bad_addr(const void *addr, size_t size) { /* * Hardware Tag-Based KASAN only calls this function for normal memory diff --git a/mm/kasan/report_sw_tags.c b/mm/kasan/report_sw_tags.c index 8b1f5a73ee6d..689e94f9fe3c 100644 --- a/mm/kasan/report_sw_tags.c +++ b/mm/kasan/report_sw_tags.c @@ -30,7 +30,7 @@ #include "kasan.h" #include "../slab.h" -void *kasan_find_first_bad_addr(void *addr, size_t size) +const void *kasan_find_first_bad_addr(const void *addr, size_t size) { u8 tag = get_tag(addr); void *p = kasan_reset_tag(addr); diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index c8b86f3273b5..3e62728ae25d 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -28,13 +28,13 @@ bool __kasan_check_read(const volatile void *p, unsigned int size) { - return kasan_check_range((unsigned long)p, size, false, _RET_IP_); + return kasan_check_range((void *)p, size, false, _RET_IP_); } EXPORT_SYMBOL(__kasan_check_read); bool __kasan_check_write(const volatile void *p, unsigned int size) { - return kasan_check_range((unsigned long)p, size, true, _RET_IP_); + return kasan_check_range((void *)p, size, true, _RET_IP_); } EXPORT_SYMBOL(__kasan_check_write); @@ -50,7 +50,7 @@ EXPORT_SYMBOL(__kasan_check_write); #undef memset void *memset(void *addr, int c, size_t len) { - if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_)) + if (!kasan_check_range(addr, len, true, _RET_IP_)) return NULL; return __memset(addr, c, len); @@ -60,8 +60,8 @@ void *memset(void *addr, int c, size_t len) #undef memmove void *memmove(void *dest, const void *src, size_t len) { - if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || - !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) + if (!kasan_check_range(src, len, false, _RET_IP_) || + !kasan_check_range(dest, len, true, _RET_IP_)) return NULL; return __memmove(dest, src, len); @@ -71,17 +71,17 @@ void *memmove(void *dest, const void *src, size_t len) #undef memcpy void *memcpy(void *dest, const void *src, size_t len) { - if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || - !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) + if (!kasan_check_range(src, len, false, _RET_IP_) || + !kasan_check_range(dest, len, true, _RET_IP_)) return NULL; return __memcpy(dest, src, len); } #endif -void *__asan_memset(void *addr, int c, size_t len) +void *__asan_memset(void *addr, int c, ssize_t len) { - if (!kasan_check_range((unsigned long)addr, len, true, _RET_IP_)) + if (!kasan_check_range(addr, len, true, _RET_IP_)) return NULL; return __memset(addr, c, len); @@ -89,10 +89,10 @@ void *__asan_memset(void *addr, int c, size_t len) EXPORT_SYMBOL(__asan_memset); #ifdef __HAVE_ARCH_MEMMOVE -void *__asan_memmove(void *dest, const void *src, size_t len) +void *__asan_memmove(void *dest, const void *src, ssize_t len) { - if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || - !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) + if (!kasan_check_range(src, len, false, _RET_IP_) || + !kasan_check_range(dest, len, true, _RET_IP_)) return NULL; return __memmove(dest, src, len); @@ -100,10 +100,10 @@ void *__asan_memmove(void *dest, const void *src, size_t len) EXPORT_SYMBOL(__asan_memmove); #endif -void *__asan_memcpy(void *dest, const void *src, size_t len) +void *__asan_memcpy(void *dest, const void *src, ssize_t len) { - if (!kasan_check_range((unsigned long)src, len, false, _RET_IP_) || - !kasan_check_range((unsigned long)dest, len, true, _RET_IP_)) + if (!kasan_check_range(src, len, false, _RET_IP_) || + !kasan_check_range(dest, len, true, _RET_IP_)) return NULL; return __memcpy(dest, src, len); @@ -111,13 +111,13 @@ void *__asan_memcpy(void *dest, const void *src, size_t len) EXPORT_SYMBOL(__asan_memcpy); #ifdef CONFIG_KASAN_SW_TAGS -void *__hwasan_memset(void *addr, int c, size_t len) __alias(__asan_memset); +void *__hwasan_memset(void *addr, int c, ssize_t len) __alias(__asan_memset); EXPORT_SYMBOL(__hwasan_memset); #ifdef __HAVE_ARCH_MEMMOVE -void *__hwasan_memmove(void *dest, const void *src, size_t len) __alias(__asan_memmove); +void *__hwasan_memmove(void *dest, const void *src, ssize_t len) __alias(__asan_memmove); EXPORT_SYMBOL(__hwasan_memmove); #endif -void *__hwasan_memcpy(void *dest, const void *src, size_t len) __alias(__asan_memcpy); +void *__hwasan_memcpy(void *dest, const void *src, ssize_t len) __alias(__asan_memcpy); EXPORT_SYMBOL(__hwasan_memcpy); #endif diff --git a/mm/kasan/sw_tags.c b/mm/kasan/sw_tags.c index 30da65fa02a1..220b5d4c6876 100644 --- a/mm/kasan/sw_tags.c +++ b/mm/kasan/sw_tags.c @@ -70,8 +70,8 @@ u8 kasan_random_tag(void) return (u8)(state % (KASAN_TAG_MAX + 1)); } -bool kasan_check_range(unsigned long addr, size_t size, bool write, - unsigned long ret_ip) +bool kasan_check_range(const void *addr, size_t size, bool write, + unsigned long ret_ip) { u8 tag; u8 *shadow_first, *shadow_last, *shadow; @@ -133,12 +133,12 @@ bool kasan_byte_accessible(const void *addr) } #define DEFINE_HWASAN_LOAD_STORE(size) \ - void __hwasan_load##size##_noabort(unsigned long addr) \ + void __hwasan_load##size##_noabort(void *addr) \ { \ - kasan_check_range(addr, size, false, _RET_IP_); \ + kasan_check_range(addr, size, false, _RET_IP_); \ } \ EXPORT_SYMBOL(__hwasan_load##size##_noabort); \ - void __hwasan_store##size##_noabort(unsigned long addr) \ + void __hwasan_store##size##_noabort(void *addr) \ { \ kasan_check_range(addr, size, true, _RET_IP_); \ } \ @@ -150,25 +150,25 @@ DEFINE_HWASAN_LOAD_STORE(4); DEFINE_HWASAN_LOAD_STORE(8); DEFINE_HWASAN_LOAD_STORE(16); -void __hwasan_loadN_noabort(unsigned long addr, unsigned long size) +void __hwasan_loadN_noabort(void *addr, ssize_t size) { kasan_check_range(addr, size, false, _RET_IP_); } EXPORT_SYMBOL(__hwasan_loadN_noabort); -void __hwasan_storeN_noabort(unsigned long addr, unsigned long size) +void __hwasan_storeN_noabort(void *addr, ssize_t size) { kasan_check_range(addr, size, true, _RET_IP_); } EXPORT_SYMBOL(__hwasan_storeN_noabort); -void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size) +void __hwasan_tag_memory(void *addr, u8 tag, ssize_t size) { - kasan_poison((void *)addr, size, tag, false); + kasan_poison(addr, size, tag, false); } EXPORT_SYMBOL(__hwasan_tag_memory); -void kasan_tag_mismatch(unsigned long addr, unsigned long access_info, +void kasan_tag_mismatch(void *addr, unsigned long access_info, unsigned long ret_ip) { kasan_report(addr, 1 << (access_info & 0xf), access_info & 0x10, -- cgit v1.2.3 From eb83f6528b563550e9ba92f9accfd453ca28e828 Mon Sep 17 00:00:00 2001 From: Pasha Tatashin Date: Mon, 8 May 2023 23:40:59 +0000 Subject: mm: hugetlb_vmemmap: provide stronger vmemmap allocation guarantees HugeTLB pages have a struct page optimizations where struct pages for tail pages are freed. However, when HugeTLB pages are destroyed, the memory for struct pages (vmemmap) need to be allocated again. Currently, __GFP_NORETRY flag is used to allocate the memory for vmemmap, but given that this flag makes very little effort to actually reclaim memory the returning of huge pages back to the system can be problem. Lets use __GFP_RETRY_MAYFAIL instead. This flag is also performs graceful reclaim without causing ooms, but at least it may perform a few retries, and will fail only when there is genuinely little amount of unused memory in the system. Freeing a 1G page requires 16M of free memory. A machine might need to be reconfigured from one task to another, and release a large number of 1G pages back to the system if allocating 16M fails, the release won't work. Link: https://lkml.kernel.org/r/20230508234059.2529638-1-pasha.tatashin@soleen.com Signed-off-by: Pasha Tatashin Suggested-by: David Rientjes Reviewed-by: Mike Kravetz Cc: Michal Hocko Cc: Muchun Song Signed-off-by: Andrew Morton --- mm/hugetlb_vmemmap.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 27f001e0f0a2..f42079b73f82 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -384,8 +384,9 @@ static int vmemmap_remap_free(unsigned long start, unsigned long end, } static int alloc_vmemmap_page_list(unsigned long start, unsigned long end, - gfp_t gfp_mask, struct list_head *list) + struct list_head *list) { + gfp_t gfp_mask = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_THISNODE; unsigned long nr_pages = (end - start) >> PAGE_SHIFT; int nid = page_to_nid((struct page *)start); struct page *page, *next; @@ -413,12 +414,11 @@ out: * @end: end address of the vmemmap virtual address range that we want to * remap. * @reuse: reuse address. - * @gfp_mask: GFP flag for allocating vmemmap pages. * * Return: %0 on success, negative error code otherwise. */ static int vmemmap_remap_alloc(unsigned long start, unsigned long end, - unsigned long reuse, gfp_t gfp_mask) + unsigned long reuse) { LIST_HEAD(vmemmap_pages); struct vmemmap_remap_walk walk = { @@ -430,7 +430,7 @@ static int vmemmap_remap_alloc(unsigned long start, unsigned long end, /* See the comment in the vmemmap_remap_free(). */ BUG_ON(start - reuse != PAGE_SIZE); - if (alloc_vmemmap_page_list(start, end, gfp_mask, &vmemmap_pages)) + if (alloc_vmemmap_page_list(start, end, &vmemmap_pages)) return -ENOMEM; mmap_read_lock(&init_mm); @@ -476,8 +476,7 @@ int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) * When a HugeTLB page is freed to the buddy allocator, previously * discarded vmemmap pages must be allocated and remapping. */ - ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse, - GFP_KERNEL | __GFP_NORETRY | __GFP_THISNODE); + ret = vmemmap_remap_alloc(vmemmap_start, vmemmap_end, vmemmap_reuse); if (!ret) { ClearHPageVmemmapOptimized(head); static_branch_dec(&hugetlb_optimize_vmemmap_key); -- cgit v1.2.3 From 97de10a9932c363a4e4ee9bb5f2297e254fb1413 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 8 May 2023 19:41:28 +0800 Subject: mm: memory-failure: move sysctl register in memory_failure_init() There is already a memory_failure_init(), don't add a new initcall, move register_sysctl_init() into it to cleanup a bit. Link: https://lkml.kernel.org/r/20230508114128.37081-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Acked-by: Naoya Horiguchi Signed-off-by: Andrew Morton --- mm/memory-failure.c | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 5b663eca1f29..004a02f44271 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -123,7 +123,6 @@ const struct attribute_group memory_failure_attr_group = { .attrs = memory_failure_attr, }; -#ifdef CONFIG_SYSCTL static struct ctl_table memory_failure_table[] = { { .procname = "memory_failure_early_kill", @@ -146,14 +145,6 @@ static struct ctl_table memory_failure_table[] = { { } }; -static int __init memory_failure_sysctl_init(void) -{ - register_sysctl_init("vm", memory_failure_table); - return 0; -} -late_initcall(memory_failure_sysctl_init); -#endif /* CONFIG_SYSCTL */ - /* * Return values: * 1: the page is dissolved (if needed) and taken off from buddy, @@ -2441,6 +2432,8 @@ static int __init memory_failure_init(void) INIT_WORK(&mf_cpu->work, memory_failure_work_func); } + register_sysctl_init("vm", memory_failure_table); + return 0; } core_initcall(memory_failure_init); -- cgit v1.2.3 From 4822acb1369637938c1252d534d3356c5e313cde Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Mon, 8 May 2023 07:35:38 +0000 Subject: mm, oom: do not check 0 mask in out_of_memory() Since commit 60e2793d440a ("mm, oom: do not trigger out_of_memory from the #PF"), no user sets gfp_mask to 0. Remove the 0 mask check and update the comments. Link: https://lkml.kernel.org/r/20230508073538.1168-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/oom_kill.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 044e1eed720e..612b5597d3af 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -1130,12 +1130,10 @@ bool out_of_memory(struct oom_control *oc) /* * The OOM killer does not compensate for IO-less reclaim. - * pagefault_out_of_memory lost its gfp context so we have to - * make sure exclude 0 mask - all other users should have at least - * ___GFP_DIRECT_RECLAIM to get here. But mem_cgroup_oom() has to - * invoke the OOM killer even if it is a GFP_NOFS allocation. + * But mem_cgroup_oom() has to invoke the OOM killer even + * if it is a GFP_NOFS allocation. */ - if (oc->gfp_mask && !(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) + if (!(oc->gfp_mask & __GFP_FS) && !is_memcg_oom(oc)) return true; /* -- cgit v1.2.3 From 539aa041a9b1d98cd847b949ba8f91857c995f26 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 15 May 2023 12:33:41 +0100 Subject: mm: compaction: ensure rescanning only happens on partially scanned pageblocks Patch series "Follow-up "Fix excessive CPU usage during compaction"". The series "Fix excessive CPU usage during compaction" [1] attempted to fix a bug [2] but Vlastimil noted that the fix was incomplete [3]. While the series was merged, fast_find_migrateblock was still disabled. This series should fix the corner cases and allow 95e7a450b819 ("Revert "mm/compaction: fix set skip in fast_find_migrateblock"") to be safely reverted. Details on how many pageblocks are rescanned are in the changelog of the last patch. "Raghavendra K T" tested this and reported "decent improvement from perf perspective as well as compaction related data [4] [1] https://lore.kernel.org/r/20230125134434.18017-1-mgorman@techsingularity.net [2] https://bugzilla.suse.com/show_bug.cgi?id=1206848 [3] https://lore.kernel.org/r/a55cf026-a2f9-ef01-9a4c-398693e048ea@suse.cz [4] https://lkml.kernel.org/r/6d62686f-964d-342c-e085-0eae2555cc54@amd.com This patch (of 4): compact_zone() intends to rescan pageblocks if there is a failure to migrate "within the current order-aligned block". However, the pageblock scan may already be complete and moved to the next block causing the next pageblock to be "rescanned". Ensure only the most recent pageblock is rescanned. Link: https://lkml.kernel.org/r/20230515113344.6869-1-mgorman@techsingularity.net Link: https://lkml.kernel.org/r/20230515113344.6869-2-mgorman@techsingularity.net Signed-off-by: Mel Gorman Reported-by: Vlastimil Babka Tested-by: Raghavendra K T Acked-by: Vlastimil Babka Cc: Chuyi Zhou Cc: Jiri Slaby Cc: Maxim Levitsky Cc: Mel Gorman Cc: Michal Hocko Cc: Paolo Bonzini Cc: Pedro Falcato Signed-off-by: Andrew Morton --- mm/compaction.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 5584fa5fa3d4..d16b0fcd6db5 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2465,8 +2465,9 @@ rescan: * fast_find_migrateblock revisiting blocks that were * recently partially scanned. */ - if (cc->direct_compaction && !cc->finish_pageblock && - (cc->mode < MIGRATE_SYNC)) { + if (!pageblock_aligned(cc->migrate_pfn) && + cc->direct_compaction && !cc->finish_pageblock && + (cc->mode < MIGRATE_SYNC)) { cc->finish_pageblock = true; /* -- cgit v1.2.3 From 9ecc5fc50a9c75b0af252913163a9f1ac4206b17 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 15 May 2023 12:33:42 +0100 Subject: mm: compaction: only force pageblock scan completion when skip hints are obeyed fast_find_migrateblock relies on skip hints to avoid rescanning a recently selected pageblock but compact_zone() only forces the pageblock scan completion to set the skip hint if in direct compaction. While this prevents direct compaction repeatedly scanning a subset of blocks due to fast_find_migrateblock(), it does not prevent proactive compaction, node compaction and kcompactd encountering the same problem described in commit cfccd2e63e7e ("mm, compaction: finish pageblocks on complete migration failure"). Force the scan completion of a pageblock to set the skip hint if skip hints are obeyed to prevent fast_find_migrateblock() repeatedly selecting a subset of pageblocks. Link: https://lkml.kernel.org/r/20230515113344.6869-3-mgorman@techsingularity.net Signed-off-by: Mel Gorman Suggested-by: Vlastimil Babka Tested-by: Raghavendra K T Acked-by: Vlastimil Babka Cc: Chuyi Zhou Cc: Jiri Slaby Cc: Maxim Levitsky Cc: Michal Hocko Cc: Paolo Bonzini Cc: Pedro Falcato Signed-off-by: Andrew Morton --- mm/compaction.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index d16b0fcd6db5..009128d1e4ef 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2457,7 +2457,8 @@ rescan: } /* * If an ASYNC or SYNC_LIGHT fails to migrate a page - * within the current order-aligned block, scan the + * within the current order-aligned block and + * fast_find_migrateblock may be used then scan the * remainder of the pageblock. This will mark the * pageblock "skip" to avoid rescanning in the near * future. This will isolate more pages than necessary @@ -2466,7 +2467,7 @@ rescan: * recently partially scanned. */ if (!pageblock_aligned(cc->migrate_pfn) && - cc->direct_compaction && !cc->finish_pageblock && + !cc->ignore_skip_hint && !cc->finish_pageblock && (cc->mode < MIGRATE_SYNC)) { cc->finish_pageblock = true; -- cgit v1.2.3 From 590ccea80af950685de7f72ec43831765e5c8cb1 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 15 May 2023 12:33:43 +0100 Subject: mm: compaction: update pageblock skip when first migration candidate is not at the start isolate_migratepages_block should mark a pageblock as skip if scanning started on an aligned pageblock boundary but it only updates the skip flag if the first migration candidate is also aligned. Tracing during a compaction stress load (mmtests: workload-usemem-stress-numa-compact) that many pageblocks are not marked skip causing excessive scanning of blocks that had been recently checked. Update pageblock skip based on "valid_page" which is set if scanning started on a pageblock boundary. [mgorman@techsingularity.net: fix handling of skip bit] Link: https://lkml.kernel.org/r/20230602111622.swtxhn6lu2qwgrwq@techsingularity.net Link: https://lkml.kernel.org/r/20230515113344.6869-4-mgorman@techsingularity.net Signed-off-by: Mel Gorman Tested-by: Raghavendra K T Acked-by: Vlastimil Babka Cc: Chuyi Zhou Cc: Jiri Slaby Cc: Maxim Levitsky Cc: Michal Hocko Cc: Paolo Bonzini Cc: Pedro Falcato Signed-off-by: Andrew Morton --- mm/compaction.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 009128d1e4ef..02aa3788765d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -392,18 +392,14 @@ void reset_isolation_suitable(pg_data_t *pgdat) * Sets the pageblock skip bit if it was clear. Note that this is a hint as * locks are not required for read/writers. Returns true if it was already set. */ -static bool test_and_set_skip(struct compact_control *cc, struct page *page, - unsigned long pfn) +static bool test_and_set_skip(struct compact_control *cc, struct page *page) { bool skip; - /* Do no update if skip hint is being ignored */ + /* Do not update if skip hint is being ignored */ if (cc->ignore_skip_hint) return false; - if (!pageblock_aligned(pfn)) - return false; - skip = get_pageblock_skip(page); if (!skip && !cc->no_set_skip_hint) set_pageblock_skip(page); @@ -470,8 +466,7 @@ static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) { } -static bool test_and_set_skip(struct compact_control *cc, struct page *page, - unsigned long pfn) +static bool test_and_set_skip(struct compact_control *cc, struct page *page) { return false; } @@ -1074,11 +1069,17 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, lruvec_memcg_debug(lruvec, page_folio(page)); - /* Try get exclusive access under lock */ - if (!skip_updated) { + /* + * Try get exclusive access under lock. If marked for + * skip, the scan is aborted unless the current context + * is a rescan to reach the end of the pageblock. + */ + if (!skip_updated && valid_page) { skip_updated = true; - if (test_and_set_skip(cc, page, low_pfn)) + if (test_and_set_skip(cc, valid_page) && + !cc->finish_pageblock) { goto isolate_abort; + } } /* -- cgit v1.2.3 From 90ed667c03fe553a41d79057740ed5df951eead0 Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Mon, 15 May 2023 12:33:44 +0100 Subject: Revert "Revert "mm/compaction: fix set skip in fast_find_migrateblock"" This reverts commit 95e7a450b819 ("Revert "mm/compaction: fix set skip in fast_find_migrateblock""). Commit 7efc3b726103 ("mm/compaction: fix set skip in fast_find_migrateblock") was reverted due to bug reports about khugepaged consuming large amounts of CPU without making progress. The underlying bug was partially fixed by commit cfccd2e63e7e ("mm, compaction: finish pageblocks on complete migration failure") but it only mitigated the problem and Vlastimil Babka pointing out the same issue could theoretically happen to kcompactd. As pageblocks containing pages that fail to migrate should now be forcibly rescanned to set the skip hint if skip hints are used, fast_find_migrateblock() should no longer loop on a small subset of pageblocks for prolonged periods of time. Revert the revert so fast_find_migrateblock() is effective again. Using the mmtests config workload-usemem-stress-numa-compact, the number of unique ranges scanned was analysed for both kcompactd and !kcompactd activity. 6.4.0-rc1-vanilla kcompactd 7 range=(0x10d600~0x10d800) 7 range=(0x110c00~0x110e00) 7 range=(0x110e00~0x111000) 7 range=(0x111800~0x111a00) 7 range=(0x111a00~0x111c00) !kcompactd 1 range=(0x113e00~0x114000) 1 range=(0x114000~0x114020) 1 range=(0x114400~0x114489) 1 range=(0x114489~0x1144aa) 1 range=(0x1144aa~0x114600) 6.4.0-rc1-mm-revertfastmigrate kcompactd 17 range=(0x104200~0x104400) 17 range=(0x104400~0x104600) 17 range=(0x104600~0x104800) 17 range=(0x104800~0x104a00) 17 range=(0x104a00~0x104c00) !kcompactd 1793 range=(0x15c200~0x15c400) 5436 range=(0x105800~0x105a00) 19826 range=(0x150a00~0x150c00) 19833 range=(0x150800~0x150a00) 19834 range=(0x11ce00~0x11d000) 6.4.0-rc1-mm-follupfastfind kcompactd 22 range=(0x107200~0x107400) 23 range=(0x107400~0x107600) 23 range=(0x107600~0x107800) 23 range=(0x107c00~0x107e00) 23 range=(0x107e00~0x108000) !kcompactd 3 range=(0x890240~0x890400) 5 range=(0x886e00~0x887000) 5 range=(0x88a400~0x88a600) 6 range=(0x88f800~0x88fa00) 9 range=(0x88a400~0x88a420) Note that the vanilla kernel and the full series had some duplication of ranges scanned but it was not severe and would be in line with compaction resets when the skip hints are cleared. Just a revert of commit 7efc3b726103 ("mm/compaction: fix set skip in fast_find_migrateblock") showed excessive rescans of the same ranges so the series should not reintroduce bug 1206848. Link: https://bugzilla.suse.com/show_bug.cgi?id=1206848 Link: https://lkml.kernel.org/r/20230515113344.6869-5-mgorman@techsingularity.net Signed-off-by: Mel Gorman Tested-by: Raghavendra K T Acked-by: Vlastimil Babka Cc: Chuyi Zhou Cc: Jiri Slaby Cc: Maxim Levitsky Cc: Michal Hocko Cc: Paolo Bonzini Cc: Pedro Falcato Signed-off-by: Andrew Morton --- mm/compaction.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 02aa3788765d..f6465ae74d3f 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1866,7 +1866,6 @@ static unsigned long fast_find_migrateblock(struct compact_control *cc) pfn = cc->zone->zone_start_pfn; cc->fast_search_fail = 0; found_block = true; - set_pageblock_skip(freepage); break; } } -- cgit v1.2.3 From b758fe6df50daf68fef089d8f3c1cd49fc794ed2 Mon Sep 17 00:00:00 2001 From: "Mike Rapoport (IBM)" Date: Mon, 15 May 2023 11:34:00 +0300 Subject: mm/secretmem: make it on by default Following the discussion about direct map fragmentaion at LSF/MM [1], it appears that direct map fragmentation has a negligible effect on kernel data accesses. Since the only reason that warranted secretmem to be disabled by default was concern about performance regression caused by the direct map fragmentation, it makes perfect sense to lift this restriction and make secretmem enabled. secretmem obeys RLIMIT_MEMBLOCK and as such it is not expected to cause large fragmentation of the direct map or meaningfull increase in page tables allocated during split of the large mappings in the direct map. The secretmem.enable parameter is retained to allow system administrators to disable secretmem at boot. Switch the default setting of secretmem.enable parameter to 1. Link: https://lwn.net/Articles/931406/ [1] Link: https://lkml.kernel.org/r/20230515083400.3563974-1-rppt@kernel.org Signed-off-by: Mike Rapoport (IBM) Acked-by: David Hildenbrand Cc: Randy Dunlap Signed-off-by: Andrew Morton --- mm/secretmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/secretmem.c b/mm/secretmem.c index 0b502625cd30..974b32ba8b9d 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -35,7 +35,7 @@ #define SECRETMEM_MODE_MASK (0x0) #define SECRETMEM_FLAGS_MASK SECRETMEM_MODE_MASK -static bool secretmem_enable __ro_after_init; +static bool secretmem_enable __ro_after_init = 1; module_param_named(enable, secretmem_enable, bool, 0400); MODULE_PARM_DESC(secretmem_enable, "Enable secretmem and memfd_secret(2) system call"); -- cgit v1.2.3 From f24f66eef5c03e07120edddde3608477f10b3980 Mon Sep 17 00:00:00 2001 From: Alexey Romanov Date: Tue, 16 May 2023 12:50:29 +0300 Subject: mm/zsmalloc: get rid of PAGE_MASK Use offset_in_page() macro instead of 'val & ~PAGE_MASK' Link: https://lkml.kernel.org/r/20230516095029.49036-2-avromanov@sberdevices.ru Signed-off-by: Alexey Romanov Reviewed-by: Sergey Senozhatsky Cc: Minchan Kim Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 02f7f414aade..c0d433541636 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1341,7 +1341,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, spin_unlock(&pool->lock); class = zspage_class(pool, zspage); - off = (class->size * obj_idx) & ~PAGE_MASK; + off = offset_in_page(class->size * obj_idx); local_lock(&zs_map_area.lock); area = this_cpu_ptr(&zs_map_area); @@ -1381,7 +1381,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) obj_to_location(obj, &page, &obj_idx); zspage = get_zspage(page); class = zspage_class(pool, zspage); - off = (class->size * obj_idx) & ~PAGE_MASK; + off = offset_in_page(class->size * obj_idx); area = this_cpu_ptr(&zs_map_area); if (off + class->size <= PAGE_SIZE) @@ -1438,7 +1438,7 @@ static unsigned long obj_malloc(struct zs_pool *pool, offset = obj * class->size; nr_page = offset >> PAGE_SHIFT; - m_offset = offset & ~PAGE_MASK; + m_offset = offset_in_page(offset); m_page = get_first_page(zspage); for (i = 0; i < nr_page; i++) @@ -1548,7 +1548,7 @@ static void obj_free(int class_size, unsigned long obj, unsigned long *handle) void *vaddr; obj_to_location(obj, &f_page, &f_objidx); - f_offset = (class_size * f_objidx) & ~PAGE_MASK; + f_offset = offset_in_page(class_size * f_objidx); zspage = get_zspage(f_page); vaddr = kmap_atomic(f_page); @@ -1640,8 +1640,8 @@ static void zs_object_copy(struct size_class *class, unsigned long dst, obj_to_location(src, &s_page, &s_objidx); obj_to_location(dst, &d_page, &d_objidx); - s_off = (class->size * s_objidx) & ~PAGE_MASK; - d_off = (class->size * d_objidx) & ~PAGE_MASK; + s_off = offset_in_page(class->size * s_objidx); + d_off = offset_in_page(class->size * d_objidx); if (s_off + class->size > PAGE_SIZE) s_size = PAGE_SIZE - s_off; -- cgit v1.2.3 From 072ba380cefc7722c9442cc14a9c2810898c13ac Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:09 +0800 Subject: mm: page_alloc: move mirrored_kernelcore into mm_init.c Patch series "mm: page_alloc: misc cleanup and refactor", v2. This aims to reduce more space in page_alloc.c, also do some cleanup, no functional changes intended. This patch (of 13): Since commit 9420f89db2dd ("mm: move most of core MM initialization to mm/mm_init.c"), mirrored_kernelcore should be moved into mm_init.c, as most related codes are already there. Link: https://lkml.kernel.org/r/20230516063821.121844-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20230516063821.121844-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/mm_init.c | 2 ++ mm/page_alloc.c | 3 --- 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 7f7f9c677854..da162b7a044c 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -259,6 +259,8 @@ static int __init cmdline_parse_core(char *p, unsigned long *core, return 0; } +bool mirrored_kernelcore __initdata_memblock; + /* * kernelcore=size sets the amount of memory for use for allocations that * cannot be reclaimed or migrated. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index af9c995d3c1e..d1086aeca8f2 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -23,7 +23,6 @@ #include #include #include -#include #include #include #include @@ -374,8 +373,6 @@ int user_min_free_kbytes = -1; int watermark_boost_factor __read_mostly = 15000; int watermark_scale_factor = 10; -bool mirrored_kernelcore __initdata_memblock; - /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; EXPORT_SYMBOL(movable_zone); -- cgit v1.2.3 From 5e7d5da2f41c1d762cd1dbdd97758be6c414ea29 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:10 +0800 Subject: mm: page_alloc: move init_on_alloc/free() into mm_init.c Since commit f2fc4b44ec2b ("mm: move init_mem_debugging_and_hardening() to mm/mm_init.c"), the init_on_alloc() and init_on_free() define is better to move there too. Link: https://lkml.kernel.org/r/20230516063821.121844-3-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/mm_init.c | 6 ++++++ mm/page_alloc.c | 5 ----- 2 files changed, 6 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index da162b7a044c..15201887f8e0 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2543,6 +2543,12 @@ void __init memblock_free_pages(struct page *page, unsigned long pfn, __free_pages_core(page, order); } +DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); +EXPORT_SYMBOL(init_on_alloc); + +DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); +EXPORT_SYMBOL(init_on_free); + static bool _init_on_alloc_enabled_early __read_mostly = IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON); static int __init early_init_on_alloc(char *buf) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d1086aeca8f2..4f094ba7c8fb 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -233,11 +233,6 @@ unsigned long totalcma_pages __read_mostly; int percpu_pagelist_high_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; -DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, init_on_alloc); -EXPORT_SYMBOL(init_on_alloc); - -DEFINE_STATIC_KEY_MAYBE(CONFIG_INIT_ON_FREE_DEFAULT_ON, init_on_free); -EXPORT_SYMBOL(init_on_free); /* * A cached value of the page's pageblock's migratetype, used when the page is -- cgit v1.2.3 From 904d58578fce531be07619a2bc2cdc16c9fd49b6 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:11 +0800 Subject: mm: page_alloc: move set_zone_contiguous() into mm_init.c set_zone_contiguous() is only used in mm init/hotplug, and clear_zone_contiguous() only used in hotplug, move them from page_alloc.c to the more appropriate file. Link: https://lkml.kernel.org/r/20230516063821.121844-4-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/memory_hotplug.h | 3 --- mm/internal.h | 7 +++++++ mm/mm_init.c | 22 ++++++++++++++++++++++ mm/page_alloc.c | 27 --------------------------- 4 files changed, 29 insertions(+), 30 deletions(-) (limited to 'mm') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 9fcbf5706595..04bc286eed42 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -326,9 +326,6 @@ static inline int remove_memory(u64 start, u64 size) static inline void __remove_memory(u64 start, u64 size) {} #endif /* CONFIG_MEMORY_HOTREMOVE */ -extern void set_zone_contiguous(struct zone *zone); -extern void clear_zone_contiguous(struct zone *zone); - #ifdef CONFIG_MEMORY_HOTPLUG extern void __ref free_area_init_core_hotplug(struct pglist_data *pgdat); extern int __add_memory(int nid, u64 start, u64 size, mhp_t mhp_flags); diff --git a/mm/internal.h b/mm/internal.h index 68410c6d97ac..c99da2cfac71 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -371,6 +371,13 @@ static inline struct page *pageblock_pfn_to_page(unsigned long start_pfn, return __pageblock_pfn_to_page(start_pfn, end_pfn, zone); } +void set_zone_contiguous(struct zone *zone); + +static inline void clear_zone_contiguous(struct zone *zone) +{ + zone->contiguous = false; +} + extern int __isolate_free_page(struct page *page, unsigned int order); extern void __putback_isolated_page(struct page *page, unsigned int order, int mt); diff --git a/mm/mm_init.c b/mm/mm_init.c index 15201887f8e0..0fd4ddfdfb2e 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2330,6 +2330,28 @@ void __init init_cma_reserved_pageblock(struct page *page) } #endif +void set_zone_contiguous(struct zone *zone) +{ + unsigned long block_start_pfn = zone->zone_start_pfn; + unsigned long block_end_pfn; + + block_end_pfn = pageblock_end_pfn(block_start_pfn); + for (; block_start_pfn < zone_end_pfn(zone); + block_start_pfn = block_end_pfn, + block_end_pfn += pageblock_nr_pages) { + + block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); + + if (!__pageblock_pfn_to_page(block_start_pfn, + block_end_pfn, zone)) + return; + cond_resched(); + } + + /* We confirm that there is no hole */ + zone->contiguous = true; +} + void __init page_alloc_init_late(void) { struct zone *zone; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 4f094ba7c8fb..7bb0d6abfe3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1532,33 +1532,6 @@ struct page *__pageblock_pfn_to_page(unsigned long start_pfn, return start_page; } -void set_zone_contiguous(struct zone *zone) -{ - unsigned long block_start_pfn = zone->zone_start_pfn; - unsigned long block_end_pfn; - - block_end_pfn = pageblock_end_pfn(block_start_pfn); - for (; block_start_pfn < zone_end_pfn(zone); - block_start_pfn = block_end_pfn, - block_end_pfn += pageblock_nr_pages) { - - block_end_pfn = min(block_end_pfn, zone_end_pfn(zone)); - - if (!__pageblock_pfn_to_page(block_start_pfn, - block_end_pfn, zone)) - return; - cond_resched(); - } - - /* We confirm that there is no hole */ - zone->contiguous = true; -} - -void clear_zone_contiguous(struct zone *zone) -{ - zone->contiguous = false; -} - /* * The order of subdivision here is critical for the IO subsystem. * Please do not alter this order without good reasons and regression -- cgit v1.2.3 From e9aae1709264b2353d67d4846bd617c445a49fe0 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:12 +0800 Subject: mm: page_alloc: collect mem statistic into show_mem.c Let's move show_mem.c from lib to mm, as it belongs memory subsystem, also split some memory statistic related functions from page_alloc.c to show_mem.c, and we cleanup some unneeded include. There is no functional change. Link: https://lkml.kernel.org/r/20230516063821.121844-5-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- lib/Makefile | 2 +- lib/show_mem.c | 37 ----- mm/Makefile | 2 +- mm/page_alloc.c | 402 ---------------------------------------------------- mm/show_mem.c | 429 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 5 files changed, 431 insertions(+), 441 deletions(-) delete mode 100644 lib/show_mem.c create mode 100644 mm/show_mem.c (limited to 'mm') diff --git a/lib/Makefile b/lib/Makefile index 876fcdeae34e..38f23f352736 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -30,7 +30,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ maple_tree.o idr.o extable.o irq_regs.o argv_split.o \ - flex_proportions.o ratelimit.o show_mem.o \ + flex_proportions.o ratelimit.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ nmi_backtrace.o win_minmax.o memcat_p.o \ diff --git a/lib/show_mem.c b/lib/show_mem.c deleted file mode 100644 index 1485c87be935..000000000000 --- a/lib/show_mem.c +++ /dev/null @@ -1,37 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic show_mem() implementation - * - * Copyright (C) 2008 Johannes Weiner - */ - -#include -#include - -void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) -{ - unsigned long total = 0, reserved = 0, highmem = 0; - struct zone *zone; - - printk("Mem-Info:\n"); - __show_free_areas(filter, nodemask, max_zone_idx); - - for_each_populated_zone(zone) { - - total += zone->present_pages; - reserved += zone->present_pages - zone_managed_pages(zone); - - if (is_highmem(zone)) - highmem += zone->present_pages; - } - - printk("%lu pages RAM\n", total); - printk("%lu pages HighMem/MovableOnly\n", highmem); - printk("%lu pages reserved\n", reserved); -#ifdef CONFIG_CMA - printk("%lu pages cma reserved\n", totalcma_pages); -#endif -#ifdef CONFIG_MEMORY_FAILURE - printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); -#endif -} diff --git a/mm/Makefile b/mm/Makefile index e29afc890cde..5262ce5baa28 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -51,7 +51,7 @@ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ util.o mmzone.o vmstat.o backing-dev.o \ mm_init.o percpu.o slab_common.o \ - compaction.o \ + compaction.o show_mem.o\ interval_tree.o list_lru.o workingset.o \ debug.o gup.o mmap_lock.o $(mmu-y) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7bb0d6abfe3d..34a9fe934891 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -18,10 +18,7 @@ #include #include #include -#include -#include #include -#include #include #include #include @@ -30,8 +27,6 @@ #include #include #include -#include -#include #include #include #include @@ -40,19 +35,10 @@ #include #include #include -#include #include -#include -#include -#include -#include #include #include -#include #include -#include -#include -#include #include #include #include @@ -60,12 +46,9 @@ #include #include #include -#include -#include #include #include #include -#include #include #include #include @@ -73,13 +56,10 @@ #include #include #include -#include -#include #include #include "internal.h" #include "shuffle.h" #include "page_reporting.h" -#include "swap.h" /* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */ typedef int __bitwise fpi_t; @@ -226,11 +206,6 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { }; EXPORT_SYMBOL(node_states); -atomic_long_t _totalram_pages __read_mostly; -EXPORT_SYMBOL(_totalram_pages); -unsigned long totalreserve_pages __read_mostly; -unsigned long totalcma_pages __read_mostly; - int percpu_pagelist_high_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; @@ -5102,383 +5077,6 @@ unsigned long nr_free_buffer_pages(void) } EXPORT_SYMBOL_GPL(nr_free_buffer_pages); -static inline void show_node(struct zone *zone) -{ - if (IS_ENABLED(CONFIG_NUMA)) - printk("Node %d ", zone_to_nid(zone)); -} - -long si_mem_available(void) -{ - long available; - unsigned long pagecache; - unsigned long wmark_low = 0; - unsigned long pages[NR_LRU_LISTS]; - unsigned long reclaimable; - struct zone *zone; - int lru; - - for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) - pages[lru] = global_node_page_state(NR_LRU_BASE + lru); - - for_each_zone(zone) - wmark_low += low_wmark_pages(zone); - - /* - * Estimate the amount of memory available for userspace allocations, - * without causing swapping or OOM. - */ - available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; - - /* - * Not all the page cache can be freed, otherwise the system will - * start swapping or thrashing. Assume at least half of the page - * cache, or the low watermark worth of cache, needs to stay. - */ - pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; - pagecache -= min(pagecache / 2, wmark_low); - available += pagecache; - - /* - * Part of the reclaimable slab and other kernel memory consists of - * items that are in use, and cannot be freed. Cap this estimate at the - * low watermark. - */ - reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + - global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); - available += reclaimable - min(reclaimable / 2, wmark_low); - - if (available < 0) - available = 0; - return available; -} -EXPORT_SYMBOL_GPL(si_mem_available); - -void si_meminfo(struct sysinfo *val) -{ - val->totalram = totalram_pages(); - val->sharedram = global_node_page_state(NR_SHMEM); - val->freeram = global_zone_page_state(NR_FREE_PAGES); - val->bufferram = nr_blockdev_pages(); - val->totalhigh = totalhigh_pages(); - val->freehigh = nr_free_highpages(); - val->mem_unit = PAGE_SIZE; -} - -EXPORT_SYMBOL(si_meminfo); - -#ifdef CONFIG_NUMA -void si_meminfo_node(struct sysinfo *val, int nid) -{ - int zone_type; /* needs to be signed */ - unsigned long managed_pages = 0; - unsigned long managed_highpages = 0; - unsigned long free_highpages = 0; - pg_data_t *pgdat = NODE_DATA(nid); - - for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) - managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); - val->totalram = managed_pages; - val->sharedram = node_page_state(pgdat, NR_SHMEM); - val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); -#ifdef CONFIG_HIGHMEM - for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { - struct zone *zone = &pgdat->node_zones[zone_type]; - - if (is_highmem(zone)) { - managed_highpages += zone_managed_pages(zone); - free_highpages += zone_page_state(zone, NR_FREE_PAGES); - } - } - val->totalhigh = managed_highpages; - val->freehigh = free_highpages; -#else - val->totalhigh = managed_highpages; - val->freehigh = free_highpages; -#endif - val->mem_unit = PAGE_SIZE; -} -#endif - -/* - * Determine whether the node should be displayed or not, depending on whether - * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). - */ -static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) -{ - if (!(flags & SHOW_MEM_FILTER_NODES)) - return false; - - /* - * no node mask - aka implicit memory numa policy. Do not bother with - * the synchronization - read_mems_allowed_begin - because we do not - * have to be precise here. - */ - if (!nodemask) - nodemask = &cpuset_current_mems_allowed; - - return !node_isset(nid, *nodemask); -} - -static void show_migration_types(unsigned char type) -{ - static const char types[MIGRATE_TYPES] = { - [MIGRATE_UNMOVABLE] = 'U', - [MIGRATE_MOVABLE] = 'M', - [MIGRATE_RECLAIMABLE] = 'E', - [MIGRATE_HIGHATOMIC] = 'H', -#ifdef CONFIG_CMA - [MIGRATE_CMA] = 'C', -#endif -#ifdef CONFIG_MEMORY_ISOLATION - [MIGRATE_ISOLATE] = 'I', -#endif - }; - char tmp[MIGRATE_TYPES + 1]; - char *p = tmp; - int i; - - for (i = 0; i < MIGRATE_TYPES; i++) { - if (type & (1 << i)) - *p++ = types[i]; - } - - *p = '\0'; - printk(KERN_CONT "(%s) ", tmp); -} - -static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) -{ - int zone_idx; - for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) - if (zone_managed_pages(pgdat->node_zones + zone_idx)) - return true; - return false; -} - -/* - * Show free area list (used inside shift_scroll-lock stuff) - * We also calculate the percentage fragmentation. We do this by counting the - * memory on each free list with the exception of the first item on the list. - * - * Bits in @filter: - * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's - * cpuset. - */ -void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) -{ - unsigned long free_pcp = 0; - int cpu, nid; - struct zone *zone; - pg_data_t *pgdat; - - for_each_populated_zone(zone) { - if (zone_idx(zone) > max_zone_idx) - continue; - if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) - continue; - - for_each_online_cpu(cpu) - free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; - } - - printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" - " active_file:%lu inactive_file:%lu isolated_file:%lu\n" - " unevictable:%lu dirty:%lu writeback:%lu\n" - " slab_reclaimable:%lu slab_unreclaimable:%lu\n" - " mapped:%lu shmem:%lu pagetables:%lu\n" - " sec_pagetables:%lu bounce:%lu\n" - " kernel_misc_reclaimable:%lu\n" - " free:%lu free_pcp:%lu free_cma:%lu\n", - global_node_page_state(NR_ACTIVE_ANON), - global_node_page_state(NR_INACTIVE_ANON), - global_node_page_state(NR_ISOLATED_ANON), - global_node_page_state(NR_ACTIVE_FILE), - global_node_page_state(NR_INACTIVE_FILE), - global_node_page_state(NR_ISOLATED_FILE), - global_node_page_state(NR_UNEVICTABLE), - global_node_page_state(NR_FILE_DIRTY), - global_node_page_state(NR_WRITEBACK), - global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), - global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), - global_node_page_state(NR_FILE_MAPPED), - global_node_page_state(NR_SHMEM), - global_node_page_state(NR_PAGETABLE), - global_node_page_state(NR_SECONDARY_PAGETABLE), - global_zone_page_state(NR_BOUNCE), - global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), - global_zone_page_state(NR_FREE_PAGES), - free_pcp, - global_zone_page_state(NR_FREE_CMA_PAGES)); - - for_each_online_pgdat(pgdat) { - if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) - continue; - if (!node_has_managed_zones(pgdat, max_zone_idx)) - continue; - - printk("Node %d" - " active_anon:%lukB" - " inactive_anon:%lukB" - " active_file:%lukB" - " inactive_file:%lukB" - " unevictable:%lukB" - " isolated(anon):%lukB" - " isolated(file):%lukB" - " mapped:%lukB" - " dirty:%lukB" - " writeback:%lukB" - " shmem:%lukB" -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - " shmem_thp: %lukB" - " shmem_pmdmapped: %lukB" - " anon_thp: %lukB" -#endif - " writeback_tmp:%lukB" - " kernel_stack:%lukB" -#ifdef CONFIG_SHADOW_CALL_STACK - " shadow_call_stack:%lukB" -#endif - " pagetables:%lukB" - " sec_pagetables:%lukB" - " all_unreclaimable? %s" - "\n", - pgdat->node_id, - K(node_page_state(pgdat, NR_ACTIVE_ANON)), - K(node_page_state(pgdat, NR_INACTIVE_ANON)), - K(node_page_state(pgdat, NR_ACTIVE_FILE)), - K(node_page_state(pgdat, NR_INACTIVE_FILE)), - K(node_page_state(pgdat, NR_UNEVICTABLE)), - K(node_page_state(pgdat, NR_ISOLATED_ANON)), - K(node_page_state(pgdat, NR_ISOLATED_FILE)), - K(node_page_state(pgdat, NR_FILE_MAPPED)), - K(node_page_state(pgdat, NR_FILE_DIRTY)), - K(node_page_state(pgdat, NR_WRITEBACK)), - K(node_page_state(pgdat, NR_SHMEM)), -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - K(node_page_state(pgdat, NR_SHMEM_THPS)), - K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), - K(node_page_state(pgdat, NR_ANON_THPS)), -#endif - K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), - node_page_state(pgdat, NR_KERNEL_STACK_KB), -#ifdef CONFIG_SHADOW_CALL_STACK - node_page_state(pgdat, NR_KERNEL_SCS_KB), -#endif - K(node_page_state(pgdat, NR_PAGETABLE)), - K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), - pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? - "yes" : "no"); - } - - for_each_populated_zone(zone) { - int i; - - if (zone_idx(zone) > max_zone_idx) - continue; - if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) - continue; - - free_pcp = 0; - for_each_online_cpu(cpu) - free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; - - show_node(zone); - printk(KERN_CONT - "%s" - " free:%lukB" - " boost:%lukB" - " min:%lukB" - " low:%lukB" - " high:%lukB" - " reserved_highatomic:%luKB" - " active_anon:%lukB" - " inactive_anon:%lukB" - " active_file:%lukB" - " inactive_file:%lukB" - " unevictable:%lukB" - " writepending:%lukB" - " present:%lukB" - " managed:%lukB" - " mlocked:%lukB" - " bounce:%lukB" - " free_pcp:%lukB" - " local_pcp:%ukB" - " free_cma:%lukB" - "\n", - zone->name, - K(zone_page_state(zone, NR_FREE_PAGES)), - K(zone->watermark_boost), - K(min_wmark_pages(zone)), - K(low_wmark_pages(zone)), - K(high_wmark_pages(zone)), - K(zone->nr_reserved_highatomic), - K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), - K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), - K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), - K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), - K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), - K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), - K(zone->present_pages), - K(zone_managed_pages(zone)), - K(zone_page_state(zone, NR_MLOCK)), - K(zone_page_state(zone, NR_BOUNCE)), - K(free_pcp), - K(this_cpu_read(zone->per_cpu_pageset->count)), - K(zone_page_state(zone, NR_FREE_CMA_PAGES))); - printk("lowmem_reserve[]:"); - for (i = 0; i < MAX_NR_ZONES; i++) - printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); - printk(KERN_CONT "\n"); - } - - for_each_populated_zone(zone) { - unsigned int order; - unsigned long nr[MAX_ORDER + 1], flags, total = 0; - unsigned char types[MAX_ORDER + 1]; - - if (zone_idx(zone) > max_zone_idx) - continue; - if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) - continue; - show_node(zone); - printk(KERN_CONT "%s: ", zone->name); - - spin_lock_irqsave(&zone->lock, flags); - for (order = 0; order <= MAX_ORDER; order++) { - struct free_area *area = &zone->free_area[order]; - int type; - - nr[order] = area->nr_free; - total += nr[order] << order; - - types[order] = 0; - for (type = 0; type < MIGRATE_TYPES; type++) { - if (!free_area_empty(area, type)) - types[order] |= 1 << type; - } - } - spin_unlock_irqrestore(&zone->lock, flags); - for (order = 0; order <= MAX_ORDER; order++) { - printk(KERN_CONT "%lu*%lukB ", - nr[order], K(1UL) << order); - if (nr[order]) - show_migration_types(types[order]); - } - printk(KERN_CONT "= %lukB\n", K(total)); - } - - for_each_online_node(nid) { - if (show_mem_node_skip(filter, nid, nodemask)) - continue; - hugetlb_show_meminfo_node(nid); - } - - printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); - - show_swap_cache_info(); -} - static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref) { zoneref->zone = zone; diff --git a/mm/show_mem.c b/mm/show_mem.c new file mode 100644 index 000000000000..01f8e9905817 --- /dev/null +++ b/mm/show_mem.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Generic show_mem() implementation + * + * Copyright (C) 2008 Johannes Weiner + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "internal.h" +#include "swap.h" + +atomic_long_t _totalram_pages __read_mostly; +EXPORT_SYMBOL(_totalram_pages); +unsigned long totalreserve_pages __read_mostly; +unsigned long totalcma_pages __read_mostly; + +static inline void show_node(struct zone *zone) +{ + if (IS_ENABLED(CONFIG_NUMA)) + printk("Node %d ", zone_to_nid(zone)); +} + +long si_mem_available(void) +{ + long available; + unsigned long pagecache; + unsigned long wmark_low = 0; + unsigned long pages[NR_LRU_LISTS]; + unsigned long reclaimable; + struct zone *zone; + int lru; + + for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++) + pages[lru] = global_node_page_state(NR_LRU_BASE + lru); + + for_each_zone(zone) + wmark_low += low_wmark_pages(zone); + + /* + * Estimate the amount of memory available for userspace allocations, + * without causing swapping or OOM. + */ + available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages; + + /* + * Not all the page cache can be freed, otherwise the system will + * start swapping or thrashing. Assume at least half of the page + * cache, or the low watermark worth of cache, needs to stay. + */ + pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE]; + pagecache -= min(pagecache / 2, wmark_low); + available += pagecache; + + /* + * Part of the reclaimable slab and other kernel memory consists of + * items that are in use, and cannot be freed. Cap this estimate at the + * low watermark. + */ + reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE); + available += reclaimable - min(reclaimable / 2, wmark_low); + + if (available < 0) + available = 0; + return available; +} +EXPORT_SYMBOL_GPL(si_mem_available); + +void si_meminfo(struct sysinfo *val) +{ + val->totalram = totalram_pages(); + val->sharedram = global_node_page_state(NR_SHMEM); + val->freeram = global_zone_page_state(NR_FREE_PAGES); + val->bufferram = nr_blockdev_pages(); + val->totalhigh = totalhigh_pages(); + val->freehigh = nr_free_highpages(); + val->mem_unit = PAGE_SIZE; +} + +EXPORT_SYMBOL(si_meminfo); + +#ifdef CONFIG_NUMA +void si_meminfo_node(struct sysinfo *val, int nid) +{ + int zone_type; /* needs to be signed */ + unsigned long managed_pages = 0; + unsigned long managed_highpages = 0; + unsigned long free_highpages = 0; + pg_data_t *pgdat = NODE_DATA(nid); + + for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) + managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]); + val->totalram = managed_pages; + val->sharedram = node_page_state(pgdat, NR_SHMEM); + val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES); +#ifdef CONFIG_HIGHMEM + for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) { + struct zone *zone = &pgdat->node_zones[zone_type]; + + if (is_highmem(zone)) { + managed_highpages += zone_managed_pages(zone); + free_highpages += zone_page_state(zone, NR_FREE_PAGES); + } + } + val->totalhigh = managed_highpages; + val->freehigh = free_highpages; +#else + val->totalhigh = managed_highpages; + val->freehigh = free_highpages; +#endif + val->mem_unit = PAGE_SIZE; +} +#endif + +/* + * Determine whether the node should be displayed or not, depending on whether + * SHOW_MEM_FILTER_NODES was passed to show_free_areas(). + */ +static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask) +{ + if (!(flags & SHOW_MEM_FILTER_NODES)) + return false; + + /* + * no node mask - aka implicit memory numa policy. Do not bother with + * the synchronization - read_mems_allowed_begin - because we do not + * have to be precise here. + */ + if (!nodemask) + nodemask = &cpuset_current_mems_allowed; + + return !node_isset(nid, *nodemask); +} + +static void show_migration_types(unsigned char type) +{ + static const char types[MIGRATE_TYPES] = { + [MIGRATE_UNMOVABLE] = 'U', + [MIGRATE_MOVABLE] = 'M', + [MIGRATE_RECLAIMABLE] = 'E', + [MIGRATE_HIGHATOMIC] = 'H', +#ifdef CONFIG_CMA + [MIGRATE_CMA] = 'C', +#endif +#ifdef CONFIG_MEMORY_ISOLATION + [MIGRATE_ISOLATE] = 'I', +#endif + }; + char tmp[MIGRATE_TYPES + 1]; + char *p = tmp; + int i; + + for (i = 0; i < MIGRATE_TYPES; i++) { + if (type & (1 << i)) + *p++ = types[i]; + } + + *p = '\0'; + printk(KERN_CONT "(%s) ", tmp); +} + +static bool node_has_managed_zones(pg_data_t *pgdat, int max_zone_idx) +{ + int zone_idx; + for (zone_idx = 0; zone_idx <= max_zone_idx; zone_idx++) + if (zone_managed_pages(pgdat->node_zones + zone_idx)) + return true; + return false; +} + +/* + * Show free area list (used inside shift_scroll-lock stuff) + * We also calculate the percentage fragmentation. We do this by counting the + * memory on each free list with the exception of the first item on the list. + * + * Bits in @filter: + * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's + * cpuset. + */ +void __show_free_areas(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) +{ + unsigned long free_pcp = 0; + int cpu, nid; + struct zone *zone; + pg_data_t *pgdat; + + for_each_populated_zone(zone) { + if (zone_idx(zone) > max_zone_idx) + continue; + if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) + continue; + + for_each_online_cpu(cpu) + free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; + } + + printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n" + " active_file:%lu inactive_file:%lu isolated_file:%lu\n" + " unevictable:%lu dirty:%lu writeback:%lu\n" + " slab_reclaimable:%lu slab_unreclaimable:%lu\n" + " mapped:%lu shmem:%lu pagetables:%lu\n" + " sec_pagetables:%lu bounce:%lu\n" + " kernel_misc_reclaimable:%lu\n" + " free:%lu free_pcp:%lu free_cma:%lu\n", + global_node_page_state(NR_ACTIVE_ANON), + global_node_page_state(NR_INACTIVE_ANON), + global_node_page_state(NR_ISOLATED_ANON), + global_node_page_state(NR_ACTIVE_FILE), + global_node_page_state(NR_INACTIVE_FILE), + global_node_page_state(NR_ISOLATED_FILE), + global_node_page_state(NR_UNEVICTABLE), + global_node_page_state(NR_FILE_DIRTY), + global_node_page_state(NR_WRITEBACK), + global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B), + global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B), + global_node_page_state(NR_FILE_MAPPED), + global_node_page_state(NR_SHMEM), + global_node_page_state(NR_PAGETABLE), + global_node_page_state(NR_SECONDARY_PAGETABLE), + global_zone_page_state(NR_BOUNCE), + global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE), + global_zone_page_state(NR_FREE_PAGES), + free_pcp, + global_zone_page_state(NR_FREE_CMA_PAGES)); + + for_each_online_pgdat(pgdat) { + if (show_mem_node_skip(filter, pgdat->node_id, nodemask)) + continue; + if (!node_has_managed_zones(pgdat, max_zone_idx)) + continue; + + printk("Node %d" + " active_anon:%lukB" + " inactive_anon:%lukB" + " active_file:%lukB" + " inactive_file:%lukB" + " unevictable:%lukB" + " isolated(anon):%lukB" + " isolated(file):%lukB" + " mapped:%lukB" + " dirty:%lukB" + " writeback:%lukB" + " shmem:%lukB" +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + " shmem_thp: %lukB" + " shmem_pmdmapped: %lukB" + " anon_thp: %lukB" +#endif + " writeback_tmp:%lukB" + " kernel_stack:%lukB" +#ifdef CONFIG_SHADOW_CALL_STACK + " shadow_call_stack:%lukB" +#endif + " pagetables:%lukB" + " sec_pagetables:%lukB" + " all_unreclaimable? %s" + "\n", + pgdat->node_id, + K(node_page_state(pgdat, NR_ACTIVE_ANON)), + K(node_page_state(pgdat, NR_INACTIVE_ANON)), + K(node_page_state(pgdat, NR_ACTIVE_FILE)), + K(node_page_state(pgdat, NR_INACTIVE_FILE)), + K(node_page_state(pgdat, NR_UNEVICTABLE)), + K(node_page_state(pgdat, NR_ISOLATED_ANON)), + K(node_page_state(pgdat, NR_ISOLATED_FILE)), + K(node_page_state(pgdat, NR_FILE_MAPPED)), + K(node_page_state(pgdat, NR_FILE_DIRTY)), + K(node_page_state(pgdat, NR_WRITEBACK)), + K(node_page_state(pgdat, NR_SHMEM)), +#ifdef CONFIG_TRANSPARENT_HUGEPAGE + K(node_page_state(pgdat, NR_SHMEM_THPS)), + K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)), + K(node_page_state(pgdat, NR_ANON_THPS)), +#endif + K(node_page_state(pgdat, NR_WRITEBACK_TEMP)), + node_page_state(pgdat, NR_KERNEL_STACK_KB), +#ifdef CONFIG_SHADOW_CALL_STACK + node_page_state(pgdat, NR_KERNEL_SCS_KB), +#endif + K(node_page_state(pgdat, NR_PAGETABLE)), + K(node_page_state(pgdat, NR_SECONDARY_PAGETABLE)), + pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ? + "yes" : "no"); + } + + for_each_populated_zone(zone) { + int i; + + if (zone_idx(zone) > max_zone_idx) + continue; + if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) + continue; + + free_pcp = 0; + for_each_online_cpu(cpu) + free_pcp += per_cpu_ptr(zone->per_cpu_pageset, cpu)->count; + + show_node(zone); + printk(KERN_CONT + "%s" + " free:%lukB" + " boost:%lukB" + " min:%lukB" + " low:%lukB" + " high:%lukB" + " reserved_highatomic:%luKB" + " active_anon:%lukB" + " inactive_anon:%lukB" + " active_file:%lukB" + " inactive_file:%lukB" + " unevictable:%lukB" + " writepending:%lukB" + " present:%lukB" + " managed:%lukB" + " mlocked:%lukB" + " bounce:%lukB" + " free_pcp:%lukB" + " local_pcp:%ukB" + " free_cma:%lukB" + "\n", + zone->name, + K(zone_page_state(zone, NR_FREE_PAGES)), + K(zone->watermark_boost), + K(min_wmark_pages(zone)), + K(low_wmark_pages(zone)), + K(high_wmark_pages(zone)), + K(zone->nr_reserved_highatomic), + K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)), + K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)), + K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)), + K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)), + K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)), + K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)), + K(zone->present_pages), + K(zone_managed_pages(zone)), + K(zone_page_state(zone, NR_MLOCK)), + K(zone_page_state(zone, NR_BOUNCE)), + K(free_pcp), + K(this_cpu_read(zone->per_cpu_pageset->count)), + K(zone_page_state(zone, NR_FREE_CMA_PAGES))); + printk("lowmem_reserve[]:"); + for (i = 0; i < MAX_NR_ZONES; i++) + printk(KERN_CONT " %ld", zone->lowmem_reserve[i]); + printk(KERN_CONT "\n"); + } + + for_each_populated_zone(zone) { + unsigned int order; + unsigned long nr[MAX_ORDER + 1], flags, total = 0; + unsigned char types[MAX_ORDER + 1]; + + if (zone_idx(zone) > max_zone_idx) + continue; + if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask)) + continue; + show_node(zone); + printk(KERN_CONT "%s: ", zone->name); + + spin_lock_irqsave(&zone->lock, flags); + for (order = 0; order <= MAX_ORDER; order++) { + struct free_area *area = &zone->free_area[order]; + int type; + + nr[order] = area->nr_free; + total += nr[order] << order; + + types[order] = 0; + for (type = 0; type < MIGRATE_TYPES; type++) { + if (!free_area_empty(area, type)) + types[order] |= 1 << type; + } + } + spin_unlock_irqrestore(&zone->lock, flags); + for (order = 0; order <= MAX_ORDER; order++) { + printk(KERN_CONT "%lu*%lukB ", + nr[order], K(1UL) << order); + if (nr[order]) + show_migration_types(types[order]); + } + printk(KERN_CONT "= %lukB\n", K(total)); + } + + for_each_online_node(nid) { + if (show_mem_node_skip(filter, nid, nodemask)) + continue; + hugetlb_show_meminfo_node(nid); + } + + printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES)); + + show_swap_cache_info(); +} + +void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) +{ + unsigned long total = 0, reserved = 0, highmem = 0; + struct zone *zone; + + printk("Mem-Info:\n"); + __show_free_areas(filter, nodemask, max_zone_idx); + + for_each_populated_zone(zone) { + + total += zone->present_pages; + reserved += zone->present_pages - zone_managed_pages(zone); + + if (is_highmem(zone)) + highmem += zone->present_pages; + } + + printk("%lu pages RAM\n", total); + printk("%lu pages HighMem/MovableOnly\n", highmem); + printk("%lu pages reserved\n", reserved); +#ifdef CONFIG_CMA + printk("%lu pages cma reserved\n", totalcma_pages); +#endif +#ifdef CONFIG_MEMORY_FAILURE + printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); +#endif +} -- cgit v1.2.3 From 5b855aa37cf742960d4f8a6c5c8e16092d0463be Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:13 +0800 Subject: mm: page_alloc: squash page_is_consistent() Squash the page_is_consistent() into bad_range() as there is only one caller. Link: https://lkml.kernel.org/r/20230516063821.121844-6-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/page_alloc.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 34a9fe934891..ce0d81686de3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -517,13 +517,6 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page) return ret; } -static int page_is_consistent(struct zone *zone, struct page *page) -{ - if (zone != page_zone(page)) - return 0; - - return 1; -} /* * Temporary debugging check for pages not lying within a given zone. */ @@ -531,7 +524,7 @@ static int __maybe_unused bad_range(struct zone *zone, struct page *page) { if (page_outside_zone_boundaries(zone, page)) return 1; - if (!page_is_consistent(zone, page)) + if (zone != page_zone(page)) return 1; return 0; -- cgit v1.2.3 From e9f2b529e10f9ca8d25ac83e574e027d504de879 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:14 +0800 Subject: mm: page_alloc: remove alloc_contig_dump_pages() stub DEFINE_DYNAMIC_DEBUG_METADATA and DYNAMIC_DEBUG_BRANCH already has stub definitions without dynamic debug feature, remove unnecessary alloc_contig_dump_pages() stub. Link: https://lkml.kernel.org/r/20230516063821.121844-7-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/page_alloc.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ce0d81686de3..37ac82083229 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6213,8 +6213,6 @@ out: } #ifdef CONFIG_CONTIG_ALLOC -#if defined(CONFIG_DYNAMIC_DEBUG) || \ - (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) /* Usage: See admin-guide/dynamic-debug-howto.rst */ static void alloc_contig_dump_pages(struct list_head *page_list) { @@ -6228,11 +6226,6 @@ static void alloc_contig_dump_pages(struct list_head *page_list) dump_page(page, "migration failure"); } } -#else -static inline void alloc_contig_dump_pages(struct list_head *page_list) -{ -} -#endif /* [start, end) must belong to a single zone. */ int __alloc_contig_migrate_range(struct compact_control *cc, -- cgit v1.2.3 From 0866e82e40fba45dae07e6e8385929b574201752 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:15 +0800 Subject: mm: page_alloc: split out FAIL_PAGE_ALLOC ... to a single file to reduce a bit of page_alloc.c. Link: https://lkml.kernel.org/r/20230516063821.121844-8-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/fault-inject.h | 9 ++++++ mm/Makefile | 1 + mm/fail_page_alloc.c | 66 +++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 74 -------------------------------------------- 4 files changed, 76 insertions(+), 74 deletions(-) create mode 100644 mm/fail_page_alloc.c (limited to 'mm') diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 481abf530b3c..6d5edef09d45 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -93,6 +93,15 @@ struct kmem_cache; bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); +#ifdef CONFIG_FAIL_PAGE_ALLOC +bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); +#else +static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +{ + return false; +} +#endif /* CONFIG_FAIL_PAGE_ALLOC */ + int should_failslab(struct kmem_cache *s, gfp_t gfpflags); #ifdef CONFIG_FAILSLAB extern bool __should_failslab(struct kmem_cache *s, gfp_t gfpflags); diff --git a/mm/Makefile b/mm/Makefile index 5262ce5baa28..0eec4bc72d3f 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -89,6 +89,7 @@ obj-$(CONFIG_KASAN) += kasan/ obj-$(CONFIG_KFENCE) += kfence/ obj-$(CONFIG_KMSAN) += kmsan/ obj-$(CONFIG_FAILSLAB) += failslab.o +obj-$(CONFIG_FAIL_PAGE_ALLOC) += fail_page_alloc.o obj-$(CONFIG_MEMTEST) += memtest.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_NUMA) += memory-tiers.o diff --git a/mm/fail_page_alloc.c b/mm/fail_page_alloc.c new file mode 100644 index 000000000000..b1b09cce9394 --- /dev/null +++ b/mm/fail_page_alloc.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +static struct { + struct fault_attr attr; + + bool ignore_gfp_highmem; + bool ignore_gfp_reclaim; + u32 min_order; +} fail_page_alloc = { + .attr = FAULT_ATTR_INITIALIZER, + .ignore_gfp_reclaim = true, + .ignore_gfp_highmem = true, + .min_order = 1, +}; + +static int __init setup_fail_page_alloc(char *str) +{ + return setup_fault_attr(&fail_page_alloc.attr, str); +} +__setup("fail_page_alloc=", setup_fail_page_alloc); + +bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) +{ + int flags = 0; + + if (order < fail_page_alloc.min_order) + return false; + if (gfp_mask & __GFP_NOFAIL) + return false; + if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) + return false; + if (fail_page_alloc.ignore_gfp_reclaim && + (gfp_mask & __GFP_DIRECT_RECLAIM)) + return false; + + /* See comment in __should_failslab() */ + if (gfp_mask & __GFP_NOWARN) + flags |= FAULT_NOWARN; + + return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags); +} + +#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS + +static int __init fail_page_alloc_debugfs(void) +{ + umode_t mode = S_IFREG | 0600; + struct dentry *dir; + + dir = fault_create_debugfs_attr("fail_page_alloc", NULL, + &fail_page_alloc.attr); + + debugfs_create_bool("ignore-gfp-wait", mode, dir, + &fail_page_alloc.ignore_gfp_reclaim); + debugfs_create_bool("ignore-gfp-highmem", mode, dir, + &fail_page_alloc.ignore_gfp_highmem); + debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); + + return 0; +} + +late_initcall(fail_page_alloc_debugfs); + +#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 37ac82083229..2b8e4a086c3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2994,80 +2994,6 @@ out: return page; } -#ifdef CONFIG_FAIL_PAGE_ALLOC - -static struct { - struct fault_attr attr; - - bool ignore_gfp_highmem; - bool ignore_gfp_reclaim; - u32 min_order; -} fail_page_alloc = { - .attr = FAULT_ATTR_INITIALIZER, - .ignore_gfp_reclaim = true, - .ignore_gfp_highmem = true, - .min_order = 1, -}; - -static int __init setup_fail_page_alloc(char *str) -{ - return setup_fault_attr(&fail_page_alloc.attr, str); -} -__setup("fail_page_alloc=", setup_fail_page_alloc); - -static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) -{ - int flags = 0; - - if (order < fail_page_alloc.min_order) - return false; - if (gfp_mask & __GFP_NOFAIL) - return false; - if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM)) - return false; - if (fail_page_alloc.ignore_gfp_reclaim && - (gfp_mask & __GFP_DIRECT_RECLAIM)) - return false; - - /* See comment in __should_failslab() */ - if (gfp_mask & __GFP_NOWARN) - flags |= FAULT_NOWARN; - - return should_fail_ex(&fail_page_alloc.attr, 1 << order, flags); -} - -#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS - -static int __init fail_page_alloc_debugfs(void) -{ - umode_t mode = S_IFREG | 0600; - struct dentry *dir; - - dir = fault_create_debugfs_attr("fail_page_alloc", NULL, - &fail_page_alloc.attr); - - debugfs_create_bool("ignore-gfp-wait", mode, dir, - &fail_page_alloc.ignore_gfp_reclaim); - debugfs_create_bool("ignore-gfp-highmem", mode, dir, - &fail_page_alloc.ignore_gfp_highmem); - debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order); - - return 0; -} - -late_initcall(fail_page_alloc_debugfs); - -#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */ - -#else /* CONFIG_FAIL_PAGE_ALLOC */ - -static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) -{ - return false; -} - -#endif /* CONFIG_FAIL_PAGE_ALLOC */ - noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order) { return __should_fail_alloc_page(gfp_mask, order); -- cgit v1.2.3 From 884c175f12ce1fabff18ac113349628149fc6cf2 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:16 +0800 Subject: mm: page_alloc: split out DEBUG_PAGEALLOC Move DEBUG_PAGEALLOC related functions into a single file to reduce a bit of page_alloc.c. Link: https://lkml.kernel.org/r/20230516063821.121844-9-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/mm.h | 76 +++++++++++++++++++++++++++++++++------------------ mm/Makefile | 1 + mm/debug_page_alloc.c | 59 +++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 69 ---------------------------------------------- 4 files changed, 109 insertions(+), 96 deletions(-) create mode 100644 mm/debug_page_alloc.c (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index f64bfbd53c65..2382eaf6fd81 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3471,9 +3471,58 @@ static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) if (debug_pagealloc_enabled_static()) __kernel_map_pages(page, numpages, 0); } + +extern unsigned int _debug_guardpage_minorder; +DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); + +static inline unsigned int debug_guardpage_minorder(void) +{ + return _debug_guardpage_minorder; +} + +static inline bool debug_guardpage_enabled(void) +{ + return static_branch_unlikely(&_debug_guardpage_enabled); +} + +static inline bool page_is_guard(struct page *page) +{ + if (!debug_guardpage_enabled()) + return false; + + return PageGuard(page); +} + +bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order, + int migratetype); +static inline bool set_page_guard(struct zone *zone, struct page *page, + unsigned int order, int migratetype) +{ + if (!debug_guardpage_enabled()) + return false; + return __set_page_guard(zone, page, order, migratetype); +} + +void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order, + int migratetype); +static inline void clear_page_guard(struct zone *zone, struct page *page, + unsigned int order, int migratetype) +{ + if (!debug_guardpage_enabled()) + return; + __clear_page_guard(zone, page, order, migratetype); +} + #else /* CONFIG_DEBUG_PAGEALLOC */ static inline void debug_pagealloc_map_pages(struct page *page, int numpages) {} static inline void debug_pagealloc_unmap_pages(struct page *page, int numpages) {} +static inline unsigned int debug_guardpage_minorder(void) { return 0; } +static inline bool debug_guardpage_enabled(void) { return false; } +static inline bool page_is_guard(struct page *page) { return false; } +static inline bool set_page_guard(struct zone *zone, struct page *page, + unsigned int order, int migratetype) { return false; } +static inline void clear_page_guard(struct zone *zone, struct page *page, + unsigned int order, int migratetype) {} #endif /* CONFIG_DEBUG_PAGEALLOC */ #ifdef __HAVE_ARCH_GATE_AREA @@ -3711,33 +3760,6 @@ static inline bool vma_is_special_huge(const struct vm_area_struct *vma) #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ -#ifdef CONFIG_DEBUG_PAGEALLOC -extern unsigned int _debug_guardpage_minorder; -DECLARE_STATIC_KEY_FALSE(_debug_guardpage_enabled); - -static inline unsigned int debug_guardpage_minorder(void) -{ - return _debug_guardpage_minorder; -} - -static inline bool debug_guardpage_enabled(void) -{ - return static_branch_unlikely(&_debug_guardpage_enabled); -} - -static inline bool page_is_guard(struct page *page) -{ - if (!debug_guardpage_enabled()) - return false; - - return PageGuard(page); -} -#else -static inline unsigned int debug_guardpage_minorder(void) { return 0; } -static inline bool debug_guardpage_enabled(void) { return false; } -static inline bool page_is_guard(struct page *page) { return false; } -#endif /* CONFIG_DEBUG_PAGEALLOC */ - #if MAX_NUMNODES > 1 void __init setup_nr_node_ids(void); #else diff --git a/mm/Makefile b/mm/Makefile index 0eec4bc72d3f..678530a07326 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -124,6 +124,7 @@ obj-$(CONFIG_SECRETMEM) += secretmem.o obj-$(CONFIG_CMA_SYSFS) += cma_sysfs.o obj-$(CONFIG_USERFAULTFD) += userfaultfd.o obj-$(CONFIG_IDLE_PAGE_TRACKING) += page_idle.o +obj-$(CONFIG_DEBUG_PAGEALLOC) += debug_page_alloc.o obj-$(CONFIG_DEBUG_PAGE_REF) += debug_page_ref.o obj-$(CONFIG_DAMON) += damon/ obj-$(CONFIG_HARDENED_USERCOPY) += usercopy.o diff --git a/mm/debug_page_alloc.c b/mm/debug_page_alloc.c new file mode 100644 index 000000000000..f9d145730fd1 --- /dev/null +++ b/mm/debug_page_alloc.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include + +unsigned int _debug_guardpage_minorder; + +bool _debug_pagealloc_enabled_early __read_mostly + = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); +EXPORT_SYMBOL(_debug_pagealloc_enabled_early); +DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); +EXPORT_SYMBOL(_debug_pagealloc_enabled); + +DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); + +static int __init early_debug_pagealloc(char *buf) +{ + return kstrtobool(buf, &_debug_pagealloc_enabled_early); +} +early_param("debug_pagealloc", early_debug_pagealloc); + +static int __init debug_guardpage_minorder_setup(char *buf) +{ + unsigned long res; + + if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { + pr_err("Bad debug_guardpage_minorder value\n"); + return 0; + } + _debug_guardpage_minorder = res; + pr_info("Setting debug_guardpage_minorder to %lu\n", res); + return 0; +} +early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); + +bool __set_page_guard(struct zone *zone, struct page *page, unsigned int order, + int migratetype) +{ + if (order >= debug_guardpage_minorder()) + return false; + + __SetPageGuard(page); + INIT_LIST_HEAD(&page->buddy_list); + set_page_private(page, order); + /* Guard pages are not available for any usage */ + if (!is_migrate_isolate(migratetype)) + __mod_zone_freepage_state(zone, -(1 << order), migratetype); + + return true; +} + +void __clear_page_guard(struct zone *zone, struct page *page, unsigned int order, + int migratetype) +{ + __ClearPageGuard(page); + + set_page_private(page, 0); + if (!is_migrate_isolate(migratetype)) + __mod_zone_freepage_state(zone, (1 << order), migratetype); +} diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2b8e4a086c3d..40fa763c5074 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -664,75 +664,6 @@ void destroy_large_folio(struct folio *folio) compound_page_dtors[dtor](&folio->page); } -#ifdef CONFIG_DEBUG_PAGEALLOC -unsigned int _debug_guardpage_minorder; - -bool _debug_pagealloc_enabled_early __read_mostly - = IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT); -EXPORT_SYMBOL(_debug_pagealloc_enabled_early); -DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled); -EXPORT_SYMBOL(_debug_pagealloc_enabled); - -DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled); - -static int __init early_debug_pagealloc(char *buf) -{ - return kstrtobool(buf, &_debug_pagealloc_enabled_early); -} -early_param("debug_pagealloc", early_debug_pagealloc); - -static int __init debug_guardpage_minorder_setup(char *buf) -{ - unsigned long res; - - if (kstrtoul(buf, 10, &res) < 0 || res > MAX_ORDER / 2) { - pr_err("Bad debug_guardpage_minorder value\n"); - return 0; - } - _debug_guardpage_minorder = res; - pr_info("Setting debug_guardpage_minorder to %lu\n", res); - return 0; -} -early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup); - -static inline bool set_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) -{ - if (!debug_guardpage_enabled()) - return false; - - if (order >= debug_guardpage_minorder()) - return false; - - __SetPageGuard(page); - INIT_LIST_HEAD(&page->buddy_list); - set_page_private(page, order); - /* Guard pages are not available for any usage */ - if (!is_migrate_isolate(migratetype)) - __mod_zone_freepage_state(zone, -(1 << order), migratetype); - - return true; -} - -static inline void clear_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) -{ - if (!debug_guardpage_enabled()) - return; - - __ClearPageGuard(page); - - set_page_private(page, 0); - if (!is_migrate_isolate(migratetype)) - __mod_zone_freepage_state(zone, (1 << order), migratetype); -} -#else -static inline bool set_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) { return false; } -static inline void clear_page_guard(struct zone *zone, struct page *page, - unsigned int order, int migratetype) {} -#endif - static inline void set_buddy_order(struct page *page, unsigned int order) { set_page_private(page, order); -- cgit v1.2.3 From 31a1b9d7fe768db521b12287ec6426983e9787e3 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:17 +0800 Subject: mm: page_alloc: move mark_free_page() into snapshot.c The mark_free_page() is only used in kernel/power/snapshot.c, move it out to reduce a bit of page_alloc.c Link: https://lkml.kernel.org/r/20230516063821.121844-10-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/suspend.h | 3 --- kernel/power/snapshot.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++ mm/page_alloc.c | 55 ------------------------------------------------- 3 files changed, 52 insertions(+), 58 deletions(-) (limited to 'mm') diff --git a/include/linux/suspend.h b/include/linux/suspend.h index d0d4598a7b3f..3950a7bf33ae 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -364,9 +364,6 @@ struct pbe { struct pbe *next; }; -/* mm/page_alloc.c */ -extern void mark_free_pages(struct zone *zone); - /** * struct platform_hibernation_ops - hibernation platform support * diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index cd8b7b35f1e8..45ef0bf81c85 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c @@ -1228,6 +1228,58 @@ unsigned int snapshot_additional_pages(struct zone *zone) return 2 * rtree; } +/* + * Touch the watchdog for every WD_PAGE_COUNT pages. + */ +#define WD_PAGE_COUNT (128*1024) + +static void mark_free_pages(struct zone *zone) +{ + unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; + unsigned long flags; + unsigned int order, t; + struct page *page; + + if (zone_is_empty(zone)) + return; + + spin_lock_irqsave(&zone->lock, flags); + + max_zone_pfn = zone_end_pfn(zone); + for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) + if (pfn_valid(pfn)) { + page = pfn_to_page(pfn); + + if (!--page_count) { + touch_nmi_watchdog(); + page_count = WD_PAGE_COUNT; + } + + if (page_zone(page) != zone) + continue; + + if (!swsusp_page_is_forbidden(page)) + swsusp_unset_page_free(page); + } + + for_each_migratetype_order(order, t) { + list_for_each_entry(page, + &zone->free_area[order].free_list[t], buddy_list) { + unsigned long i; + + pfn = page_to_pfn(page); + for (i = 0; i < (1UL << order); i++) { + if (!--page_count) { + touch_nmi_watchdog(); + page_count = WD_PAGE_COUNT; + } + swsusp_set_page_free(pfn_to_page(pfn + i)); + } + } + } + spin_unlock_irqrestore(&zone->lock, flags); +} + #ifdef CONFIG_HIGHMEM /** * count_free_highmem_pages - Compute the total number of free highmem pages. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 40fa763c5074..8d306203e555 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2365,61 +2365,6 @@ void drain_all_pages(struct zone *zone) __drain_all_pages(zone, false); } -#ifdef CONFIG_HIBERNATION - -/* - * Touch the watchdog for every WD_PAGE_COUNT pages. - */ -#define WD_PAGE_COUNT (128*1024) - -void mark_free_pages(struct zone *zone) -{ - unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT; - unsigned long flags; - unsigned int order, t; - struct page *page; - - if (zone_is_empty(zone)) - return; - - spin_lock_irqsave(&zone->lock, flags); - - max_zone_pfn = zone_end_pfn(zone); - for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - - if (!--page_count) { - touch_nmi_watchdog(); - page_count = WD_PAGE_COUNT; - } - - if (page_zone(page) != zone) - continue; - - if (!swsusp_page_is_forbidden(page)) - swsusp_unset_page_free(page); - } - - for_each_migratetype_order(order, t) { - list_for_each_entry(page, - &zone->free_area[order].free_list[t], buddy_list) { - unsigned long i; - - pfn = page_to_pfn(page); - for (i = 0; i < (1UL << order); i++) { - if (!--page_count) { - touch_nmi_watchdog(); - page_count = WD_PAGE_COUNT; - } - swsusp_set_page_free(pfn_to_page(pfn + i)); - } - } - } - spin_unlock_irqrestore(&zone->lock, flags); -} -#endif /* CONFIG_PM */ - static bool free_unref_page_prepare(struct page *page, unsigned long pfn, unsigned int order) { -- cgit v1.2.3 From 07f44ac3c90c50a201307d3fe4dda120ee8394f5 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:18 +0800 Subject: mm: page_alloc: move pm_* function into power pm_restrict_gfp_mask()/pm_restore_gfp_mask() only used in power, let's move them out of page_alloc.c. Adding a general gfp_has_io_fs() function which return true if gfp with both __GFP_IO and __GFP_FS flags, then use it inside of pm_suspended_storage(), also the pm_suspended_storage() is moved into suspend.h. Link: https://lkml.kernel.org/r/20230516063821.121844-11-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/gfp.h | 15 ++++----------- include/linux/suspend.h | 6 ++++++ kernel/power/main.c | 27 +++++++++++++++++++++++++++ kernel/power/power.h | 5 +++++ mm/page_alloc.c | 38 -------------------------------------- mm/swapfile.c | 1 + 6 files changed, 43 insertions(+), 49 deletions(-) (limited to 'mm') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index ed8cb537c6a7..665f06675c83 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -338,19 +338,12 @@ extern gfp_t gfp_allowed_mask; /* Returns true if the gfp_mask allows use of ALLOC_NO_WATERMARK */ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask); -extern void pm_restrict_gfp_mask(void); -extern void pm_restore_gfp_mask(void); - -extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); - -#ifdef CONFIG_PM_SLEEP -extern bool pm_suspended_storage(void); -#else -static inline bool pm_suspended_storage(void) +static inline bool gfp_has_io_fs(gfp_t gfp) { - return false; + return (gfp & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS); } -#endif /* CONFIG_PM_SLEEP */ + +extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); #ifdef CONFIG_CONTIG_ALLOC /* The below functions must be run on a range from a single zone. */ diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3950a7bf33ae..76923051c03d 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h @@ -502,6 +502,11 @@ extern void pm_report_max_hw_sleep(u64 t); extern bool events_check_enabled; extern suspend_state_t pm_suspend_target_state; +static inline bool pm_suspended_storage(void) +{ + return !gfp_has_io_fs(gfp_allowed_mask); +} + extern bool pm_wakeup_pending(void); extern void pm_system_wakeup(void); extern void pm_system_cancel_wakeup(void); @@ -535,6 +540,7 @@ static inline void ksys_sync_helper(void) {} #define pm_notifier(fn, pri) do { (void)(fn); } while (0) +static inline bool pm_suspended_storage(void) { return false; } static inline bool pm_wakeup_pending(void) { return false; } static inline void pm_system_wakeup(void) {} static inline void pm_wakeup_clear(bool reset) {} diff --git a/kernel/power/main.c b/kernel/power/main.c index 3113ec2f1db4..34fc8359145b 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -21,6 +21,33 @@ #include "power.h" #ifdef CONFIG_PM_SLEEP +/* + * The following functions are used by the suspend/hibernate code to temporarily + * change gfp_allowed_mask in order to avoid using I/O during memory allocations + * while devices are suspended. To avoid races with the suspend/hibernate code, + * they should always be called with system_transition_mutex held + * (gfp_allowed_mask also should only be modified with system_transition_mutex + * held, unless the suspend/hibernate code is guaranteed not to run in parallel + * with that modification). + */ +static gfp_t saved_gfp_mask; + +void pm_restore_gfp_mask(void) +{ + WARN_ON(!mutex_is_locked(&system_transition_mutex)); + if (saved_gfp_mask) { + gfp_allowed_mask = saved_gfp_mask; + saved_gfp_mask = 0; + } +} + +void pm_restrict_gfp_mask(void) +{ + WARN_ON(!mutex_is_locked(&system_transition_mutex)); + WARN_ON(saved_gfp_mask); + saved_gfp_mask = gfp_allowed_mask; + gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); +} unsigned int lock_system_sleep(void) { diff --git a/kernel/power/power.h b/kernel/power/power.h index b83c8d5e188d..ac14d1b463d1 100644 --- a/kernel/power/power.h +++ b/kernel/power/power.h @@ -216,6 +216,11 @@ static inline void suspend_test_finish(const char *label) {} /* kernel/power/main.c */ extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down); extern int pm_notifier_call_chain(unsigned long val); +void pm_restrict_gfp_mask(void); +void pm_restore_gfp_mask(void); +#else +static inline void pm_restrict_gfp_mask(void) {} +static inline void pm_restore_gfp_mask(void) {} #endif #ifdef CONFIG_HIGHMEM diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8d306203e555..005aa0202ae0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -227,44 +227,6 @@ static inline void set_pcppage_migratetype(struct page *page, int migratetype) page->index = migratetype; } -#ifdef CONFIG_PM_SLEEP -/* - * The following functions are used by the suspend/hibernate code to temporarily - * change gfp_allowed_mask in order to avoid using I/O during memory allocations - * while devices are suspended. To avoid races with the suspend/hibernate code, - * they should always be called with system_transition_mutex held - * (gfp_allowed_mask also should only be modified with system_transition_mutex - * held, unless the suspend/hibernate code is guaranteed not to run in parallel - * with that modification). - */ - -static gfp_t saved_gfp_mask; - -void pm_restore_gfp_mask(void) -{ - WARN_ON(!mutex_is_locked(&system_transition_mutex)); - if (saved_gfp_mask) { - gfp_allowed_mask = saved_gfp_mask; - saved_gfp_mask = 0; - } -} - -void pm_restrict_gfp_mask(void) -{ - WARN_ON(!mutex_is_locked(&system_transition_mutex)); - WARN_ON(saved_gfp_mask); - saved_gfp_mask = gfp_allowed_mask; - gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS); -} - -bool pm_suspended_storage(void) -{ - if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) - return false; - return true; -} -#endif /* CONFIG_PM_SLEEP */ - #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE unsigned int pageblock_order __read_mostly; #endif diff --git a/mm/swapfile.c b/mm/swapfile.c index 274bbf797480..c74259001d5e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include -- cgit v1.2.3 From 5221b5a89340f63dafa4d5f38537efb1ad506f15 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:19 +0800 Subject: mm: vmscan: use gfp_has_io_fs() Use gfp_has_io_fs() instead of open-code. Link: https://lkml.kernel.org/r/20230516063821.121844-12-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 6d0cd2840cf0..15efbfbb1963 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2458,7 +2458,7 @@ static int too_many_isolated(struct pglist_data *pgdat, int file, * won't get blocked by normal direct-reclaimers, forming a circular * deadlock. */ - if ((sc->gfp_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS)) + if (gfp_has_io_fs(sc->gfp_mask)) inactive >>= 3; too_many = isolated > inactive; -- cgit v1.2.3 From e95d372c4cd46b6ec4eeacc07adcb7260ab4cfa0 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:20 +0800 Subject: mm: page_alloc: move sysctls into it own fils This moves all page alloc related sysctls to its own file, as part of the kernel/sysctl.c spring cleaning, also move some functions declarations from mm.h into internal.h. Link: https://lkml.kernel.org/r/20230516063821.121844-13-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- include/linux/mm.h | 11 ------ include/linux/mmzone.h | 21 ---------- kernel/sysctl.c | 67 -------------------------------- mm/internal.h | 11 ++++++ mm/mm_init.c | 2 + mm/page_alloc.c | 103 ++++++++++++++++++++++++++++++++++++++++++------- 6 files changed, 102 insertions(+), 113 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 2382eaf6fd81..6d7e03d83da7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2994,12 +2994,6 @@ extern int __meminit early_pfn_to_nid(unsigned long pfn); #endif extern void set_dma_reserve(unsigned long new_dma_reserve); -extern void memmap_init_range(unsigned long, int, unsigned long, - unsigned long, unsigned long, enum meminit_context, - struct vmem_altmap *, int migratetype); -extern void setup_per_zone_wmarks(void); -extern void calculate_min_free_kbytes(void); -extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); extern void __init mmap_init(void); @@ -3020,11 +3014,6 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...); extern void setup_per_cpu_pageset(void); -/* page_alloc.c */ -extern int min_free_kbytes; -extern int watermark_boost_factor; -extern int watermark_scale_factor; - /* nommu.c */ extern atomic_long_t mmap_pages_allocated; extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index a4889c9d4055..3a68326c9989 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -1512,27 +1512,6 @@ static inline bool has_managed_dma(void) } #endif -/* These two functions are used to setup the per zone pages min values */ -struct ctl_table; - -int min_free_kbytes_sysctl_handler(struct ctl_table *, int, void *, size_t *, - loff_t *); -int watermark_scale_factor_sysctl_handler(struct ctl_table *, int, void *, - size_t *, loff_t *); -extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES]; -int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, void *, - size_t *, loff_t *); -int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); -int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); -int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); -int numa_zonelist_order_handler(struct ctl_table *, int, - void *, size_t *, loff_t *); -extern int percpu_pagelist_high_fraction; -extern char numa_zonelist_order[]; -#define NUMA_ZONELIST_ORDER_LEN 16 #ifndef CONFIG_NUMA diff --git a/kernel/sysctl.c b/kernel/sysctl.c index bfe53e835524..a57de67f032f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -2119,13 +2119,6 @@ static struct ctl_table vm_table[] = { .extra2 = SYSCTL_ONE, }, #endif - { - .procname = "lowmem_reserve_ratio", - .data = &sysctl_lowmem_reserve_ratio, - .maxlen = sizeof(sysctl_lowmem_reserve_ratio), - .mode = 0644, - .proc_handler = lowmem_reserve_ratio_sysctl_handler, - }, { .procname = "drop_caches", .data = &sysctl_drop_caches, @@ -2135,39 +2128,6 @@ static struct ctl_table vm_table[] = { .extra1 = SYSCTL_ONE, .extra2 = SYSCTL_FOUR, }, - { - .procname = "min_free_kbytes", - .data = &min_free_kbytes, - .maxlen = sizeof(min_free_kbytes), - .mode = 0644, - .proc_handler = min_free_kbytes_sysctl_handler, - .extra1 = SYSCTL_ZERO, - }, - { - .procname = "watermark_boost_factor", - .data = &watermark_boost_factor, - .maxlen = sizeof(watermark_boost_factor), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = SYSCTL_ZERO, - }, - { - .procname = "watermark_scale_factor", - .data = &watermark_scale_factor, - .maxlen = sizeof(watermark_scale_factor), - .mode = 0644, - .proc_handler = watermark_scale_factor_sysctl_handler, - .extra1 = SYSCTL_ONE, - .extra2 = SYSCTL_THREE_THOUSAND, - }, - { - .procname = "percpu_pagelist_high_fraction", - .data = &percpu_pagelist_high_fraction, - .maxlen = sizeof(percpu_pagelist_high_fraction), - .mode = 0644, - .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, - .extra1 = SYSCTL_ZERO, - }, { .procname = "page_lock_unfairness", .data = &sysctl_page_lock_unfairness, @@ -2223,24 +2183,6 @@ static struct ctl_table vm_table[] = { .proc_handler = proc_dointvec_minmax, .extra1 = SYSCTL_ZERO, }, - { - .procname = "min_unmapped_ratio", - .data = &sysctl_min_unmapped_ratio, - .maxlen = sizeof(sysctl_min_unmapped_ratio), - .mode = 0644, - .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE_HUNDRED, - }, - { - .procname = "min_slab_ratio", - .data = &sysctl_min_slab_ratio, - .maxlen = sizeof(sysctl_min_slab_ratio), - .mode = 0644, - .proc_handler = sysctl_min_slab_ratio_sysctl_handler, - .extra1 = SYSCTL_ZERO, - .extra2 = SYSCTL_ONE_HUNDRED, - }, #endif #ifdef CONFIG_SMP { @@ -2267,15 +2209,6 @@ static struct ctl_table vm_table[] = { .proc_handler = mmap_min_addr_handler, }, #endif -#ifdef CONFIG_NUMA - { - .procname = "numa_zonelist_order", - .data = &numa_zonelist_order, - .maxlen = NUMA_ZONELIST_ORDER_LEN, - .mode = 0644, - .proc_handler = numa_zonelist_order_handler, - }, -#endif #if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \ (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) { diff --git a/mm/internal.h b/mm/internal.h index c99da2cfac71..66d7ddf7e211 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -213,6 +213,13 @@ static inline bool is_check_pages_enabled(void) return static_branch_unlikely(&check_pages_enabled); } +extern int min_free_kbytes; + +void setup_per_zone_wmarks(void); +void calculate_min_free_kbytes(void); +int __meminit init_per_zone_wmark_min(void); +void page_alloc_sysctl_init(void); + /* * Structure for holding the mostly immutable allocation parameters passed * between functions involved in allocations, including the alloc_pages* @@ -423,6 +430,10 @@ extern void *memmap_alloc(phys_addr_t size, phys_addr_t align, phys_addr_t min_addr, int nid, bool exact_nid); +void memmap_init_range(unsigned long, int, unsigned long, unsigned long, + unsigned long, enum meminit_context, struct vmem_altmap *, int); + + int split_free_page(struct page *free_page, unsigned int order, unsigned long split_pfn_offset); diff --git a/mm/mm_init.c b/mm/mm_init.c index 0fd4ddfdfb2e..10bf560302c4 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2392,6 +2392,8 @@ void __init page_alloc_init_late(void) /* Initialize page ext after all struct pages are initialized. */ if (deferred_struct_pages) page_ext_init(); + + page_alloc_sysctl_init(); } #ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 005aa0202ae0..d19a05264125 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -206,7 +206,6 @@ nodemask_t node_states[NR_NODE_STATES] __read_mostly = { }; EXPORT_SYMBOL(node_states); -int percpu_pagelist_high_fraction; gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; /* @@ -302,8 +301,8 @@ compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { int min_free_kbytes = 1024; int user_min_free_kbytes = -1; -int watermark_boost_factor __read_mostly = 15000; -int watermark_scale_factor = 10; +static int watermark_boost_factor __read_mostly = 15000; +static int watermark_scale_factor = 10; /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ int movable_zone; @@ -4880,12 +4879,12 @@ static int __parse_numa_zonelist_order(char *s) return 0; } -char numa_zonelist_order[] = "Node"; - +static char numa_zonelist_order[] = "Node"; +#define NUMA_ZONELIST_ORDER_LEN 16 /* * sysctl handler for numa_zonelist_order */ -int numa_zonelist_order_handler(struct ctl_table *table, int write, +static int numa_zonelist_order_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { if (write) @@ -4893,7 +4892,6 @@ int numa_zonelist_order_handler(struct ctl_table *table, int write, return proc_dostring(table, write, buffer, length, ppos); } - static int node_load[MAX_NUMNODES]; /** @@ -5296,6 +5294,7 @@ static int zone_batchsize(struct zone *zone) #endif } +static int percpu_pagelist_high_fraction; static int zone_highsize(struct zone *zone, int batch, int cpu_online) { #ifdef CONFIG_MMU @@ -5825,7 +5824,7 @@ postcore_initcall(init_per_zone_wmark_min) * that we can call two helper functions whenever min_free_kbytes * changes. */ -int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, +static int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -5841,7 +5840,7 @@ int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write, return 0; } -int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, +static int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -5871,7 +5870,7 @@ static void setup_min_unmapped_ratio(void) } -int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, +static int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -5898,7 +5897,7 @@ static void setup_min_slab_ratio(void) sysctl_min_slab_ratio) / 100; } -int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, +static int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { int rc; @@ -5922,8 +5921,8 @@ int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write, * minimum watermarks. The lowmem reserve ratio can only make sense * if in function of the boot time zone sizes. */ -int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, - void *buffer, size_t *length, loff_t *ppos) +static int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, + int write, void *buffer, size_t *length, loff_t *ppos) { int i; @@ -5943,7 +5942,7 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write, * cpu. It is the fraction of total pages in each zone that a hot per cpu * pagelist can have before it gets flushed back to buddy allocator. */ -int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, +static int percpu_pagelist_high_fraction_sysctl_handler(struct ctl_table *table, int write, void *buffer, size_t *length, loff_t *ppos) { struct zone *zone; @@ -5976,6 +5975,82 @@ out: return ret; } +static struct ctl_table page_alloc_sysctl_table[] = { + { + .procname = "min_free_kbytes", + .data = &min_free_kbytes, + .maxlen = sizeof(min_free_kbytes), + .mode = 0644, + .proc_handler = min_free_kbytes_sysctl_handler, + .extra1 = SYSCTL_ZERO, + }, + { + .procname = "watermark_boost_factor", + .data = &watermark_boost_factor, + .maxlen = sizeof(watermark_boost_factor), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ZERO, + }, + { + .procname = "watermark_scale_factor", + .data = &watermark_scale_factor, + .maxlen = sizeof(watermark_scale_factor), + .mode = 0644, + .proc_handler = watermark_scale_factor_sysctl_handler, + .extra1 = SYSCTL_ONE, + .extra2 = SYSCTL_THREE_THOUSAND, + }, + { + .procname = "percpu_pagelist_high_fraction", + .data = &percpu_pagelist_high_fraction, + .maxlen = sizeof(percpu_pagelist_high_fraction), + .mode = 0644, + .proc_handler = percpu_pagelist_high_fraction_sysctl_handler, + .extra1 = SYSCTL_ZERO, + }, + { + .procname = "lowmem_reserve_ratio", + .data = &sysctl_lowmem_reserve_ratio, + .maxlen = sizeof(sysctl_lowmem_reserve_ratio), + .mode = 0644, + .proc_handler = lowmem_reserve_ratio_sysctl_handler, + }, +#ifdef CONFIG_NUMA + { + .procname = "numa_zonelist_order", + .data = &numa_zonelist_order, + .maxlen = NUMA_ZONELIST_ORDER_LEN, + .mode = 0644, + .proc_handler = numa_zonelist_order_handler, + }, + { + .procname = "min_unmapped_ratio", + .data = &sysctl_min_unmapped_ratio, + .maxlen = sizeof(sysctl_min_unmapped_ratio), + .mode = 0644, + .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE_HUNDRED, + }, + { + .procname = "min_slab_ratio", + .data = &sysctl_min_slab_ratio, + .maxlen = sizeof(sysctl_min_slab_ratio), + .mode = 0644, + .proc_handler = sysctl_min_slab_ratio_sysctl_handler, + .extra1 = SYSCTL_ZERO, + .extra2 = SYSCTL_ONE_HUNDRED, + }, +#endif + {} +}; + +void __init page_alloc_sysctl_init(void) +{ + register_sysctl_init("vm", page_alloc_sysctl_table); +} + #ifdef CONFIG_CONTIG_ALLOC /* Usage: See admin-guide/dynamic-debug-howto.rst */ static void alloc_contig_dump_pages(struct list_head *page_list) -- cgit v1.2.3 From ecbb490d8ee38fd84a0d682282589ff723dc62c0 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Tue, 16 May 2023 14:38:21 +0800 Subject: mm: page_alloc: move is_check_pages_enabled() into page_alloc.c The is_check_pages_enabled() only used in page_alloc.c, move it into page_alloc.c, also use it in free_tail_page_prepare(). Link: https://lkml.kernel.org/r/20230516063821.121844-14-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Iurii Zaikin Cc: Kees Cook Cc: Len Brown Cc: Luis Chamberlain Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Cc: Pavel Machek Cc: Rafael J. Wysocki Signed-off-by: Andrew Morton --- mm/internal.h | 5 ----- mm/page_alloc.c | 7 ++++++- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index 66d7ddf7e211..ec55da813c13 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -208,11 +208,6 @@ extern char * const zone_names[MAX_NR_ZONES]; /* perform sanity checks on struct pages being allocated or freed */ DECLARE_STATIC_KEY_MAYBE(CONFIG_DEBUG_VM, check_pages_enabled); -static inline bool is_check_pages_enabled(void) -{ - return static_branch_unlikely(&check_pages_enabled); -} - extern int min_free_kbytes; void setup_per_zone_wmarks(void); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d19a05264125..ee23ba9c0ca7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -983,6 +983,11 @@ static inline bool free_page_is_bad(struct page *page) return true; } +static inline bool is_check_pages_enabled(void) +{ + return static_branch_unlikely(&check_pages_enabled); +} + static int free_tail_page_prepare(struct page *head_page, struct page *page) { struct folio *folio = (struct folio *)head_page; @@ -994,7 +999,7 @@ static int free_tail_page_prepare(struct page *head_page, struct page *page) */ BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1); - if (!static_branch_unlikely(&check_pages_enabled)) { + if (!is_check_pages_enabled()) { ret = 0; goto out; } -- cgit v1.2.3 From 54d020692b342f7bd02d7f5795fb5c401caecfcc Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:33 +0100 Subject: mm/gup: remove unused vmas parameter from get_user_pages() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "remove the vmas parameter from GUP APIs", v6. (pin_/get)_user_pages[_remote]() each provide an optional output parameter for an array of VMA objects associated with each page in the input range. These provide the means for VMAs to be returned, as long as mm->mmap_lock is never released during the GUP operation (i.e. the internal flag FOLL_UNLOCKABLE is not specified). In addition, these VMAs can only be accessed with the mmap_lock held and become invalidated the moment it is released. The vast majority of invocations do not use this functionality and of those that do, all but one case retrieve a single VMA to perform checks upon. It is not egregious in the single VMA cases to simply replace the operation with a vma_lookup(). In these cases we duplicate the (fast) lookup on a slow path already under the mmap_lock, abstracted to a new get_user_page_vma_remote() inline helper function which also performs error checking and reference count maintenance. The special case is io_uring, where io_pin_pages() specifically needs to assert that the VMAs underlying the range do not result in broken long-term GUP file-backed mappings. As GUP now internally asserts that FOLL_LONGTERM mappings are not file-backed in a broken fashion (i.e. requiring dirty tracking) - as implemented in "mm/gup: disallow FOLL_LONGTERM GUP-nonfast writing to file-backed mappings" - this logic is no longer required and so we can simply remove it altogether from io_uring. Eliminating the vmas parameter eliminates an entire class of danging pointer errors that might have occured should the lock have been incorrectly released. In addition, the API is simplified and now clearly expresses what it is intended for - applying the specified GUP flags and (if pinning) returning pinned pages. This change additionally opens the door to further potential improvements in GUP and the possible marrying of disparate code paths. I have run this series against gup_test with no issues. Thanks to Matthew Wilcox for suggesting this refactoring! This patch (of 6): No invocation of get_user_pages() use the vmas parameter, so remove it. The GUP API is confusing and caveated. Recent changes have done much to improve that, however there is more we can do. Exporting vmas is a prime target as the caller has to be extremely careful to preclude their use after the mmap_lock has expired or otherwise be left with dangling pointers. Removing the vmas parameter focuses the GUP functions upon their primary purpose - pinning (and outputting) pages as well as performing the actions implied by the input flags. This is part of a patch series aiming to remove the vmas parameter altogether. Link: https://lkml.kernel.org/r/cover.1684350871.git.lstoakes@gmail.com Link: https://lkml.kernel.org/r/589e0c64794668ffc799651e8d85e703262b1e9d.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Suggested-by: Matthew Wilcox (Oracle) Acked-by: Greg Kroah-Hartman Acked-by: David Hildenbrand Reviewed-by: Jason Gunthorpe Acked-by: Christian König (for radeon parts) Acked-by: Jarkko Sakkinen Reviewed-by: Christoph Hellwig Acked-by: Sean Christopherson (KVM) Cc: Catalin Marinas Cc: Dennis Dalessandro Cc: Janosch Frank Cc: Jens Axboe Cc: Sakari Ailus Signed-off-by: Andrew Morton --- arch/x86/kernel/cpu/sgx/ioctl.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- drivers/misc/sgi-gru/grufault.c | 2 +- include/linux/mm.h | 3 +-- mm/gup.c | 9 +++------ mm/gup_test.c | 5 ++--- virt/kvm/kvm_main.c | 2 +- 7 files changed, 10 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c index 21ca0a831b70..5d390df21440 100644 --- a/arch/x86/kernel/cpu/sgx/ioctl.c +++ b/arch/x86/kernel/cpu/sgx/ioctl.c @@ -214,7 +214,7 @@ static int __sgx_encl_add_page(struct sgx_encl *encl, if (!(vma->vm_flags & VM_MAYEXEC)) return -EACCES; - ret = get_user_pages(src, 1, 0, &src_page, NULL); + ret = get_user_pages(src, 1, 0, &src_page); if (ret < 1) return -EFAULT; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 2220cdf6a3f6..3a9db030f98f 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -359,7 +359,7 @@ static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm struct page **pages = ttm->pages + pinned; r = get_user_pages(userptr, num_pages, write ? FOLL_WRITE : 0, - pages, NULL); + pages); if (r < 0) goto release_pages; diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index b836936e9747..378cf02a2aa1 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -185,7 +185,7 @@ static int non_atomic_pte_lookup(struct vm_area_struct *vma, #else *pageshift = PAGE_SHIFT; #endif - if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page, NULL) <= 0) + if (get_user_pages(vaddr, 1, write ? FOLL_WRITE : 0, &page) <= 0) return -EFAULT; *paddr = page_to_phys(page); put_page(page); diff --git a/include/linux/mm.h b/include/linux/mm.h index 6d7e03d83da7..6336253c18e2 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2369,8 +2369,7 @@ long pin_user_pages_remote(struct mm_struct *mm, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked); long get_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas); + unsigned int gup_flags, struct page **pages); long pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas); diff --git a/mm/gup.c b/mm/gup.c index e19b06a66229..21daeee5f163 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2251,8 +2251,6 @@ long get_user_pages_remote(struct mm_struct *mm, * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * * This is the same as get_user_pages_remote(), just with a less-flexible * calling convention where we assume that the mm being operated on belongs to @@ -2260,16 +2258,15 @@ long get_user_pages_remote(struct mm_struct *mm, * obviously don't pass FOLL_REMOTE in here. */ long get_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas) + unsigned int gup_flags, struct page **pages) { int locked = 1; - if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_TOUCH)) + if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_TOUCH)) return -EINVAL; return __get_user_pages_locked(current->mm, start, nr_pages, pages, - vmas, &locked, gup_flags); + NULL, &locked, gup_flags); } EXPORT_SYMBOL(get_user_pages); diff --git a/mm/gup_test.c b/mm/gup_test.c index 8ae7307a1bb6..9ba8ea23f84e 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -139,8 +139,7 @@ static int __gup_test_ioctl(unsigned int cmd, pages + i); break; case GUP_BASIC_TEST: - nr = get_user_pages(addr, nr, gup->gup_flags, pages + i, - NULL); + nr = get_user_pages(addr, nr, gup->gup_flags, pages + i); break; case PIN_FAST_BENCHMARK: nr = pin_user_pages_fast(addr, nr, gup->gup_flags, @@ -161,7 +160,7 @@ static int __gup_test_ioctl(unsigned int cmd, pages + i, NULL); else nr = get_user_pages(addr, nr, gup->gup_flags, - pages + i, NULL); + pages + i); break; default: ret = -EINVAL; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 479802a892d4..51e4882d0873 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2477,7 +2477,7 @@ static inline int check_user_page_hwpoison(unsigned long addr) { int rc, flags = FOLL_HWPOISON | FOLL_WRITE; - rc = get_user_pages(addr, 1, flags, NULL, NULL); + rc = get_user_pages(addr, 1, flags, NULL); return rc == -EHWPOISON; } -- cgit v1.2.3 From 0b295316b3a9b7858eafbebdc31b4827a6edde03 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:36 +0100 Subject: mm/gup: remove unused vmas parameter from pin_user_pages_remote() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No invocation of pin_user_pages_remote() uses the vmas parameter, so remove it. This forms part of a larger patch set eliminating the use of the vmas parameters altogether. Link: https://lkml.kernel.org/r/28f000beb81e45bf538a2aaa77c90f5482b67a32.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: David Hildenbrand Reviewed-by: Jason Gunthorpe Reviewed-by: Christoph Hellwig Cc: Catalin Marinas Cc: Christian König Cc: Dennis Dalessandro Cc: Greg Kroah-Hartman Cc: Janosch Frank Cc: Jarkko Sakkinen Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Sakari Ailus Cc: Sean Christopherson Signed-off-by: Andrew Morton --- drivers/iommu/iommufd/pages.c | 4 ++-- drivers/vfio/vfio_iommu_type1.c | 2 +- include/linux/mm.h | 2 +- kernel/trace/trace_events_user.c | 2 +- mm/gup.c | 8 +++----- mm/process_vm_access.c | 2 +- 6 files changed, 9 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index 3c47846cc5ef..412ca96be128 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -786,7 +786,7 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user, user->locked = 1; } rc = pin_user_pages_remote(pages->source_mm, uptr, npages, - user->gup_flags, user->upages, NULL, + user->gup_flags, user->upages, &user->locked); } if (rc <= 0) { @@ -1799,7 +1799,7 @@ static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, rc = pin_user_pages_remote( pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE), 1, (flags & IOMMUFD_ACCESS_RW_WRITE) ? FOLL_WRITE : 0, &page, - NULL, NULL); + NULL); mmap_read_unlock(pages->source_mm); if (rc != 1) { if (WARN_ON(rc >= 0)) diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 0d2f805468e1..306e6f1d1c70 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -562,7 +562,7 @@ static int vaddr_get_pfns(struct mm_struct *mm, unsigned long vaddr, mmap_read_lock(mm); ret = pin_user_pages_remote(mm, vaddr, npages, flags | FOLL_LONGTERM, - pages, NULL, NULL); + pages, NULL); if (ret > 0) { int i; diff --git a/include/linux/mm.h b/include/linux/mm.h index 6336253c18e2..cf17ffdf4fbf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2367,7 +2367,7 @@ long get_user_pages_remote(struct mm_struct *mm, long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked); + int *locked); long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages); long pin_user_pages(unsigned long start, unsigned long nr_pages, diff --git a/kernel/trace/trace_events_user.c b/kernel/trace/trace_events_user.c index b1ecd7677642..bdc2666e8d39 100644 --- a/kernel/trace/trace_events_user.c +++ b/kernel/trace/trace_events_user.c @@ -406,7 +406,7 @@ static int user_event_enabler_write(struct user_event_mm *mm, return -EBUSY; ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT, - &page, NULL, NULL); + &page, NULL); if (unlikely(ret <= 0)) { if (!fixup_fault) diff --git a/mm/gup.c b/mm/gup.c index 21daeee5f163..edf0fe2695b0 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3100,8 +3100,6 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast); * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. @@ -3116,14 +3114,14 @@ EXPORT_SYMBOL_GPL(pin_user_pages_fast); long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked) + int *locked) { int local_locked = 1; - if (!is_valid_gup_args(pages, vmas, locked, &gup_flags, + if (!is_valid_gup_args(pages, NULL, locked, &gup_flags, FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) return 0; - return __gup_longterm_locked(mm, start, nr_pages, pages, vmas, + return __gup_longterm_locked(mm, start, nr_pages, pages, NULL, locked ? locked : &local_locked, gup_flags); } diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c index 78dfaf9e8990..0523edab03a6 100644 --- a/mm/process_vm_access.c +++ b/mm/process_vm_access.c @@ -104,7 +104,7 @@ static int process_vm_rw_single_vec(unsigned long addr, mmap_read_lock(mm); pinned_pages = pin_user_pages_remote(mm, pa, pinned_pages, flags, process_pages, - NULL, &locked); + &locked); if (locked) mmap_read_unlock(mm); if (pinned_pages <= 0) -- cgit v1.2.3 From ca5e863233e8f6acd1792fd85d6bc2729a1b2c10 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:39 +0100 Subject: mm/gup: remove vmas parameter from get_user_pages_remote() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The only instances of get_user_pages_remote() invocations which used the vmas parameter were for a single page which can instead simply look up the VMA directly. In particular:- - __update_ref_ctr() looked up the VMA but did nothing with it so we simply remove it. - __access_remote_vm() was already using vma_lookup() when the original lookup failed so by doing the lookup directly this also de-duplicates the code. We are able to perform these VMA operations as we already hold the mmap_lock in order to be able to call get_user_pages_remote(). As part of this work we add get_user_page_vma_remote() which abstracts the VMA lookup, error handling and decrementing the page reference count should the VMA lookup fail. This forms part of a broader set of patches intended to eliminate the vmas parameter altogether. [akpm@linux-foundation.org: avoid passing NULL to PTR_ERR] Link: https://lkml.kernel.org/r/d20128c849ecdbf4dd01cc828fcec32127ed939a.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Reviewed-by: Catalin Marinas (for arm64) Acked-by: David Hildenbrand Reviewed-by: Janosch Frank (for s390) Reviewed-by: Christoph Hellwig Cc: Christian König Cc: Dennis Dalessandro Cc: Greg Kroah-Hartman Cc: Jarkko Sakkinen Cc: Jason Gunthorpe Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Sakari Ailus Cc: Sean Christopherson Signed-off-by: Andrew Morton --- arch/arm64/kernel/mte.c | 17 +++++++++-------- arch/s390/kvm/interrupt.c | 2 +- fs/exec.c | 2 +- include/linux/mm.h | 34 +++++++++++++++++++++++++++++++--- kernel/events/uprobes.c | 13 +++++-------- mm/gup.c | 12 ++++-------- mm/memory.c | 20 ++++++++++---------- mm/rmap.c | 2 +- security/tomoyo/domain.c | 2 +- virt/kvm/async_pf.c | 3 +-- 10 files changed, 64 insertions(+), 43 deletions(-) (limited to 'mm') diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index 7e89968bd282..4c5ef9b20065 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -416,10 +416,9 @@ long get_mte_ctrl(struct task_struct *task) static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, struct iovec *kiov, unsigned int gup_flags) { - struct vm_area_struct *vma; void __user *buf = kiov->iov_base; size_t len = kiov->iov_len; - int ret; + int err = 0; int write = gup_flags & FOLL_WRITE; if (!access_ok(buf, len)) @@ -429,14 +428,16 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, return -EIO; while (len) { + struct vm_area_struct *vma; unsigned long tags, offset; void *maddr; - struct page *page = NULL; + struct page *page = get_user_page_vma_remote(mm, addr, + gup_flags, &vma); - ret = get_user_pages_remote(mm, addr, 1, gup_flags, &page, - &vma, NULL); - if (ret <= 0) + if (IS_ERR_OR_NULL(page)) { + err = page == NULL ? -EIO : PTR_ERR(page); break; + } /* * Only copy tags if the page has been mapped as PROT_MTE @@ -446,7 +447,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, * was never mapped with PROT_MTE. */ if (!(vma->vm_flags & VM_MTE)) { - ret = -EOPNOTSUPP; + err = -EOPNOTSUPP; put_page(page); break; } @@ -479,7 +480,7 @@ static int __access_remote_tags(struct mm_struct *mm, unsigned long addr, kiov->iov_len = buf - kiov->iov_base; if (!kiov->iov_len) { /* check for error accessing the tracee's address space */ - if (ret <= 0) + if (err) return -EIO; else return -EFAULT; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index da6dac36e959..9bd0a873f3b1 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -2777,7 +2777,7 @@ static struct page *get_map_page(struct kvm *kvm, u64 uaddr) mmap_read_lock(kvm->mm); get_user_pages_remote(kvm->mm, uaddr, 1, FOLL_WRITE, - &page, NULL, NULL); + &page, NULL); mmap_read_unlock(kvm->mm); return page; } diff --git a/fs/exec.c b/fs/exec.c index a466e797c8e2..25c65b64544b 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -220,7 +220,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, */ mmap_read_lock(bprm->mm); ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags, - &page, NULL, NULL); + &page, NULL); mmap_read_unlock(bprm->mm); if (ret <= 0) return NULL; diff --git a/include/linux/mm.h b/include/linux/mm.h index cf17ffdf4fbf..fcbfb961b49f 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2353,6 +2353,9 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping, unmap_mapping_range(mapping, holebegin, holelen, 0); } +static inline struct vm_area_struct *vma_lookup(struct mm_struct *mm, + unsigned long addr); + extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, unsigned int gup_flags); extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, @@ -2361,13 +2364,38 @@ extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags); long get_user_pages_remote(struct mm_struct *mm, - unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked); + unsigned long start, unsigned long nr_pages, + unsigned int gup_flags, struct page **pages, + int *locked); long pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked); + +static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, + unsigned long addr, + int gup_flags, + struct vm_area_struct **vmap) +{ + struct page *page; + struct vm_area_struct *vma; + int got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL); + + if (got < 0) + return ERR_PTR(got); + if (got == 0) + return NULL; + + vma = vma_lookup(mm, addr); + if (WARN_ON_ONCE(!vma)) { + put_page(page); + return ERR_PTR(-EINVAL); + } + + *vmap = vma; + return page; +} + long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages); long pin_user_pages(unsigned long start, unsigned long nr_pages, diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 59887c69d54c..607d742caa61 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -365,7 +365,6 @@ __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) { void *kaddr; struct page *page; - struct vm_area_struct *vma; int ret; short *ptr; @@ -373,7 +372,7 @@ __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) return -EINVAL; ret = get_user_pages_remote(mm, vaddr, 1, - FOLL_WRITE, &page, &vma, NULL); + FOLL_WRITE, &page, NULL); if (unlikely(ret <= 0)) { /* * We are asking for 1 page. If get_user_pages_remote() fails, @@ -474,10 +473,9 @@ retry: if (is_register) gup_flags |= FOLL_SPLIT_PMD; /* Read the page with vaddr into memory */ - ret = get_user_pages_remote(mm, vaddr, 1, gup_flags, - &old_page, &vma, NULL); - if (ret <= 0) - return ret; + old_page = get_user_page_vma_remote(mm, vaddr, gup_flags, &vma); + if (IS_ERR_OR_NULL(old_page)) + return old_page ? PTR_ERR(old_page) : 0; ret = verify_opcode(old_page, vaddr, &opcode); if (ret <= 0) @@ -2027,8 +2025,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) * but we treat this as a 'remote' access since it is * essentially a kernel access to the memory. */ - result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, - NULL, NULL); + result = get_user_pages_remote(mm, vaddr, 1, FOLL_FORCE, &page, NULL); if (result < 0) return result; diff --git a/mm/gup.c b/mm/gup.c index edf0fe2695b0..764bf0c20827 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2165,8 +2165,6 @@ static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * @locked: pointer to lock flag indicating whether lock is held and * subsequently whether VM_FAULT_RETRY functionality can be * utilised. Lock must initially be held. @@ -2181,8 +2179,6 @@ static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, * * The caller is responsible for releasing returned @pages, via put_page(). * - * @vmas are valid only as long as mmap_lock is held. - * * Must be called with mmap_lock held for read or write. * * get_user_pages_remote walks a process's page tables and takes a reference @@ -2219,15 +2215,15 @@ static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked) + int *locked) { int local_locked = 1; - if (!is_valid_gup_args(pages, vmas, locked, &gup_flags, + if (!is_valid_gup_args(pages, NULL, locked, &gup_flags, FOLL_TOUCH | FOLL_REMOTE)) return -EINVAL; - return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, + return __get_user_pages_locked(mm, start, nr_pages, pages, NULL, locked ? locked : &local_locked, gup_flags); } @@ -2237,7 +2233,7 @@ EXPORT_SYMBOL(get_user_pages_remote); long get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked) + int *locked) { return 0; } diff --git a/mm/memory.c b/mm/memory.c index f69fbc251198..4dd09f930c61 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5587,7 +5587,6 @@ EXPORT_SYMBOL_GPL(generic_access_phys); int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, int len, unsigned int gup_flags) { - struct vm_area_struct *vma; void *old_buf = buf; int write = gup_flags & FOLL_WRITE; @@ -5596,29 +5595,30 @@ int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf, /* ignore errors, just check how much was successfully transferred */ while (len) { - int bytes, ret, offset; + int bytes, offset; void *maddr; - struct page *page = NULL; + struct vm_area_struct *vma = NULL; + struct page *page = get_user_page_vma_remote(mm, addr, + gup_flags, &vma); - ret = get_user_pages_remote(mm, addr, 1, - gup_flags, &page, &vma, NULL); - if (ret <= 0) { + if (IS_ERR_OR_NULL(page)) { #ifndef CONFIG_HAVE_IOREMAP_PROT break; #else + int res = 0; + /* * Check if this is a VM_IO | VM_PFNMAP VMA, which * we can access using slightly different code. */ - vma = vma_lookup(mm, addr); if (!vma) break; if (vma->vm_ops && vma->vm_ops->access) - ret = vma->vm_ops->access(vma, addr, buf, + res = vma->vm_ops->access(vma, addr, buf, len, write); - if (ret <= 0) + if (res <= 0) break; - bytes = ret; + bytes = res; #endif } else { bytes = len; diff --git a/mm/rmap.c b/mm/rmap.c index 19392e090bec..cd918cb9a431 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -2328,7 +2328,7 @@ int make_device_exclusive_range(struct mm_struct *mm, unsigned long start, npages = get_user_pages_remote(mm, start, npages, FOLL_GET | FOLL_WRITE | FOLL_SPLIT_PMD, - pages, NULL, NULL); + pages, NULL); if (npages < 0) return npages; diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 31af29f669d2..ac20c0bdff9d 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -916,7 +916,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, */ mmap_read_lock(bprm->mm); ret = get_user_pages_remote(bprm->mm, pos, 1, - FOLL_FORCE, &page, NULL, NULL); + FOLL_FORCE, &page, NULL); mmap_read_unlock(bprm->mm); if (ret <= 0) return false; diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c index 9bfe1d6f6529..e033c79d528e 100644 --- a/virt/kvm/async_pf.c +++ b/virt/kvm/async_pf.c @@ -61,8 +61,7 @@ static void async_pf_execute(struct work_struct *work) * access remotely. */ mmap_read_lock(mm); - get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, NULL, - &locked); + get_user_pages_remote(mm, addr, 1, FOLL_WRITE, NULL, &locked); if (locked) mmap_read_unlock(mm); -- cgit v1.2.3 From 4c630f307455c06f99bdeca7f7a1ab5318604fe0 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:45 +0100 Subject: mm/gup: remove vmas parameter from pin_user_pages() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are now in a position where no caller of pin_user_pages() requires the vmas parameter at all, so eliminate this parameter from the function and all callers. This clears the way to removing the vmas parameter from GUP altogether. Link: https://lkml.kernel.org/r/195a99ae949c9f5cb589d2222b736ced96ec199a.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: David Hildenbrand Acked-by: Dennis Dalessandro [qib] Reviewed-by: Christoph Hellwig Acked-by: Sakari Ailus [drivers/media] Cc: Catalin Marinas Cc: Christian König Cc: Greg Kroah-Hartman Cc: Janosch Frank Cc: Jarkko Sakkinen Cc: Jason Gunthorpe Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Sean Christopherson Signed-off-by: Andrew Morton --- arch/powerpc/mm/book3s64/iommu_api.c | 2 +- drivers/infiniband/hw/qib/qib_user_pages.c | 2 +- drivers/infiniband/hw/usnic/usnic_uiom.c | 2 +- drivers/infiniband/sw/siw/siw_mem.c | 2 +- drivers/media/v4l2-core/videobuf-dma-sg.c | 2 +- drivers/vdpa/vdpa_user/vduse_dev.c | 2 +- drivers/vhost/vdpa.c | 2 +- include/linux/mm.h | 3 +-- io_uring/rsrc.c | 2 +- mm/gup.c | 9 +++------ mm/gup_test.c | 9 ++++----- net/xdp/xdp_umem.c | 2 +- 12 files changed, 17 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/arch/powerpc/mm/book3s64/iommu_api.c b/arch/powerpc/mm/book3s64/iommu_api.c index 81d7185e2ae8..d19fb1f3007d 100644 --- a/arch/powerpc/mm/book3s64/iommu_api.c +++ b/arch/powerpc/mm/book3s64/iommu_api.c @@ -105,7 +105,7 @@ static long mm_iommu_do_alloc(struct mm_struct *mm, unsigned long ua, ret = pin_user_pages(ua + (entry << PAGE_SHIFT), n, FOLL_WRITE | FOLL_LONGTERM, - mem->hpages + entry, NULL); + mem->hpages + entry); if (ret == n) { pinned += n; continue; diff --git a/drivers/infiniband/hw/qib/qib_user_pages.c b/drivers/infiniband/hw/qib/qib_user_pages.c index f693bc753b6b..1bb7507325bc 100644 --- a/drivers/infiniband/hw/qib/qib_user_pages.c +++ b/drivers/infiniband/hw/qib/qib_user_pages.c @@ -111,7 +111,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, ret = pin_user_pages(start_page + got * PAGE_SIZE, num_pages - got, FOLL_LONGTERM | FOLL_WRITE, - p + got, NULL); + p + got); if (ret < 0) { mmap_read_unlock(current->mm); goto bail_release; diff --git a/drivers/infiniband/hw/usnic/usnic_uiom.c b/drivers/infiniband/hw/usnic/usnic_uiom.c index 2a5cac2658ec..84e0f41e7dfa 100644 --- a/drivers/infiniband/hw/usnic/usnic_uiom.c +++ b/drivers/infiniband/hw/usnic/usnic_uiom.c @@ -140,7 +140,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ret = pin_user_pages(cur_base, min_t(unsigned long, npages, PAGE_SIZE / sizeof(struct page *)), - gup_flags, page_list, NULL); + gup_flags, page_list); if (ret < 0) goto out; diff --git a/drivers/infiniband/sw/siw/siw_mem.c b/drivers/infiniband/sw/siw/siw_mem.c index f51ab2ccf151..e6e25f15567d 100644 --- a/drivers/infiniband/sw/siw/siw_mem.c +++ b/drivers/infiniband/sw/siw/siw_mem.c @@ -422,7 +422,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) umem->page_chunk[i].plist = plist; while (nents) { rv = pin_user_pages(first_page_va, nents, foll_flags, - plist, NULL); + plist); if (rv < 0) goto out_sem_up; diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c index 53001532e8e3..405b89ea1054 100644 --- a/drivers/media/v4l2-core/videobuf-dma-sg.c +++ b/drivers/media/v4l2-core/videobuf-dma-sg.c @@ -180,7 +180,7 @@ static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, data, size, dma->nr_pages); err = pin_user_pages(data & PAGE_MASK, dma->nr_pages, gup_flags, - dma->pages, NULL); + dma->pages); if (err != dma->nr_pages) { dma->nr_pages = (err >= 0) ? err : 0; diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index de97e38c3b82..4d4405f058e8 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1052,7 +1052,7 @@ static int vduse_dev_reg_umem(struct vduse_dev *dev, goto out; pinned = pin_user_pages(uaddr, npages, FOLL_LONGTERM | FOLL_WRITE, - page_list, NULL); + page_list); if (pinned != npages) { ret = pinned < 0 ? pinned : -ENOMEM; goto out; diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 8c1aefc865f0..61223fcbe82b 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -983,7 +983,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v, while (npages) { sz2pin = min_t(unsigned long, npages, list_size); pinned = pin_user_pages(cur_base, sz2pin, - gup_flags, page_list, NULL); + gup_flags, page_list); if (sz2pin != pinned) { if (pinned < 0) { ret = pinned; diff --git a/include/linux/mm.h b/include/linux/mm.h index fcbfb961b49f..280429ffa91d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2399,8 +2399,7 @@ static inline struct page *get_user_page_vma_remote(struct mm_struct *mm, long get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages); long pin_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas); + unsigned int gup_flags, struct page **pages); long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags); long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index b6451f8bc5d5..b56bda46a9eb 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -1044,7 +1044,7 @@ struct page **io_pin_pages(unsigned long ubuf, unsigned long len, int *npages) ret = 0; mmap_read_lock(current->mm); pret = pin_user_pages(ubuf, nr_pages, FOLL_WRITE | FOLL_LONGTERM, - pages, NULL); + pages); if (pret == nr_pages) *npages = nr_pages; else diff --git a/mm/gup.c b/mm/gup.c index 764bf0c20827..18e3bc2ee3f1 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3131,8 +3131,6 @@ EXPORT_SYMBOL(pin_user_pages_remote); * @gup_flags: flags modifying lookup behaviour * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * * Nearly the same as get_user_pages(), except that FOLL_TOUCH is not set, and * FOLL_PIN is set. @@ -3141,15 +3139,14 @@ EXPORT_SYMBOL(pin_user_pages_remote); * see Documentation/core-api/pin_user_pages.rst for details. */ long pin_user_pages(unsigned long start, unsigned long nr_pages, - unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas) + unsigned int gup_flags, struct page **pages) { int locked = 1; - if (!is_valid_gup_args(pages, vmas, NULL, &gup_flags, FOLL_PIN)) + if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_PIN)) return 0; return __gup_longterm_locked(current->mm, start, nr_pages, - pages, vmas, &locked, gup_flags); + pages, NULL, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages); diff --git a/mm/gup_test.c b/mm/gup_test.c index 9ba8ea23f84e..1668ce0e0783 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -146,18 +146,17 @@ static int __gup_test_ioctl(unsigned int cmd, pages + i); break; case PIN_BASIC_TEST: - nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i, - NULL); + nr = pin_user_pages(addr, nr, gup->gup_flags, pages + i); break; case PIN_LONGTERM_BENCHMARK: nr = pin_user_pages(addr, nr, gup->gup_flags | FOLL_LONGTERM, - pages + i, NULL); + pages + i); break; case DUMP_USER_PAGES_TEST: if (gup->test_flags & GUP_TEST_FLAG_DUMP_PAGES_USE_PIN) nr = pin_user_pages(addr, nr, gup->gup_flags, - pages + i, NULL); + pages + i); else nr = get_user_pages(addr, nr, gup->gup_flags, pages + i); @@ -270,7 +269,7 @@ static inline int pin_longterm_test_start(unsigned long arg) gup_flags, pages); else cur_pages = pin_user_pages(addr, remaining_pages, - gup_flags, pages, NULL); + gup_flags, pages); if (cur_pages < 0) { pin_longterm_test_stop(); ret = cur_pages; diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index 02207e852d79..06cead2b8e34 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -103,7 +103,7 @@ static int xdp_umem_pin_pages(struct xdp_umem *umem, unsigned long address) mmap_read_lock(current->mm); npgs = pin_user_pages(address, umem->npgs, - gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL); + gup_flags | FOLL_LONGTERM, &umem->pgs[0]); mmap_read_unlock(current->mm); if (npgs != umem->npgs) { -- cgit v1.2.3 From b2cac248191b7466c5819e0da617b0705a26e197 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Wed, 17 May 2023 20:25:48 +0100 Subject: mm/gup: remove vmas array from internal GUP functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now we have eliminated all callers to GUP APIs which use the vmas parameter, eliminate it altogether. This eliminates a class of bugs where vmas might have been kept around longer than the mmap_lock and thus we need not be concerned about locks being dropped during this operation leaving behind dangling pointers. This simplifies the GUP API and makes it considerably clearer as to its purpose - follow flags are applied and if pinning, an array of pages is returned. Link: https://lkml.kernel.org/r/6811b4b2b4b3baf3dd07f422bb18853bb2cd09fb.1684350871.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: David Hildenbrand Reviewed-by: Christoph Hellwig Cc: Catalin Marinas Cc: Christian König Cc: Dennis Dalessandro Cc: Greg Kroah-Hartman Cc: Janosch Frank Cc: Jarkko Sakkinen Cc: Jason Gunthorpe Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Sakari Ailus Cc: Sean Christopherson Signed-off-by: Andrew Morton --- include/linux/hugetlb.h | 10 +++--- mm/gup.c | 83 ++++++++++++++++++------------------------------- mm/hugetlb.c | 24 ++++++-------- 3 files changed, 45 insertions(+), 72 deletions(-) (limited to 'mm') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index f1543a0568ff..21f942025fec 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -133,9 +133,8 @@ int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, unsigned long address, unsigned int flags); long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, - struct page **, struct vm_area_struct **, - unsigned long *, unsigned long *, long, unsigned int, - int *); + struct page **, unsigned long *, unsigned long *, + long, unsigned int, int *); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *, zap_flags_t); @@ -306,9 +305,8 @@ static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, static inline long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, - struct vm_area_struct **vmas, unsigned long *position, - unsigned long *nr_pages, long i, unsigned int flags, - int *nonblocking) + unsigned long *position, unsigned long *nr_pages, + long i, unsigned int flags, int *nonblocking) { BUG(); return 0; diff --git a/mm/gup.c b/mm/gup.c index 18e3bc2ee3f1..8db58305f4eb 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -1024,8 +1024,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. Or NULL, if caller * only intends to ensure the pages are faulted in. - * @vmas: array of pointers to vmas corresponding to each page. - * Or NULL if the caller does not require them. * @locked: whether we're still with the mmap_lock held * * Returns either number of pages pinned (which may be less than the @@ -1039,8 +1037,6 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) * * The caller is responsible for releasing returned @pages, via put_page(). * - * @vmas are valid only as long as mmap_lock is held. - * * Must be called with mmap_lock held. It may be released. See below. * * __get_user_pages walks a process's page tables and takes a reference to @@ -1076,7 +1072,7 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) static long __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, - struct vm_area_struct **vmas, int *locked) + int *locked) { long ret = 0, i = 0; struct vm_area_struct *vma = NULL; @@ -1116,9 +1112,9 @@ static long __get_user_pages(struct mm_struct *mm, goto out; if (is_vm_hugetlb_page(vma)) { - i = follow_hugetlb_page(mm, vma, pages, vmas, - &start, &nr_pages, i, - gup_flags, locked); + i = follow_hugetlb_page(mm, vma, pages, + &start, &nr_pages, i, + gup_flags, locked); if (!*locked) { /* * We've got a VM_FAULT_RETRY @@ -1183,10 +1179,6 @@ retry: ctx.page_mask = 0; } next_page: - if (vmas) { - vmas[i] = vma; - ctx.page_mask = 0; - } page_increm = 1 + (~(start >> PAGE_SHIFT) & ctx.page_mask); if (page_increm > nr_pages) page_increm = nr_pages; @@ -1341,7 +1333,6 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, - struct vm_area_struct **vmas, int *locked, unsigned int flags) { @@ -1379,7 +1370,7 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, pages_done = 0; for (;;) { ret = __get_user_pages(mm, start, nr_pages, flags, pages, - vmas, locked); + locked); if (!(flags & FOLL_UNLOCKABLE)) { /* VM_FAULT_RETRY couldn't trigger, bypass */ pages_done = ret; @@ -1443,7 +1434,7 @@ retry: *locked = 1; ret = __get_user_pages(mm, start, 1, flags | FOLL_TRIED, - pages, NULL, locked); + pages, locked); if (!*locked) { /* Continue to retry until we succeeded */ BUG_ON(ret != 0); @@ -1541,7 +1532,7 @@ long populate_vma_page_range(struct vm_area_struct *vma, * not result in a stack expansion that recurses back here. */ ret = __get_user_pages(mm, start, nr_pages, gup_flags, - NULL, NULL, locked ? locked : &local_locked); + NULL, locked ? locked : &local_locked); lru_add_drain(); return ret; } @@ -1599,7 +1590,7 @@ long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, return -EINVAL; ret = __get_user_pages(mm, start, nr_pages, gup_flags, - NULL, NULL, locked); + NULL, locked); lru_add_drain(); return ret; } @@ -1667,8 +1658,7 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors) #else /* CONFIG_MMU */ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, - struct vm_area_struct **vmas, int *locked, - unsigned int foll_flags) + int *locked, unsigned int foll_flags) { struct vm_area_struct *vma; bool must_unlock = false; @@ -1712,8 +1702,7 @@ static long __get_user_pages_locked(struct mm_struct *mm, unsigned long start, if (pages[i]) get_page(pages[i]); } - if (vmas) - vmas[i] = vma; + start = (start + PAGE_SIZE) & PAGE_MASK; } @@ -1894,8 +1883,7 @@ struct page *get_dump_page(unsigned long addr) int locked = 0; int ret; - ret = __get_user_pages_locked(current->mm, addr, 1, &page, NULL, - &locked, + ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked, FOLL_FORCE | FOLL_DUMP | FOLL_GET); return (ret == 1) ? page : NULL; } @@ -2068,7 +2056,6 @@ static long __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, - struct vm_area_struct **vmas, int *locked, unsigned int gup_flags) { @@ -2076,13 +2063,13 @@ static long __gup_longterm_locked(struct mm_struct *mm, long rc, nr_pinned_pages; if (!(gup_flags & FOLL_LONGTERM)) - return __get_user_pages_locked(mm, start, nr_pages, pages, vmas, + return __get_user_pages_locked(mm, start, nr_pages, pages, locked, gup_flags); flags = memalloc_pin_save(); do { nr_pinned_pages = __get_user_pages_locked(mm, start, nr_pages, - pages, vmas, locked, + pages, locked, gup_flags); if (nr_pinned_pages <= 0) { rc = nr_pinned_pages; @@ -2100,9 +2087,8 @@ static long __gup_longterm_locked(struct mm_struct *mm, * Check that the given flags are valid for the exported gup/pup interface, and * update them with the required flags that the caller must have set. */ -static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, - int *locked, unsigned int *gup_flags_p, - unsigned int to_set) +static bool is_valid_gup_args(struct page **pages, int *locked, + unsigned int *gup_flags_p, unsigned int to_set) { unsigned int gup_flags = *gup_flags_p; @@ -2144,13 +2130,6 @@ static bool is_valid_gup_args(struct page **pages, struct vm_area_struct **vmas, (gup_flags & FOLL_PCI_P2PDMA))) return false; - /* - * Can't use VMAs with locked, as locked allows GUP to unlock - * which invalidates the vmas array - */ - if (WARN_ON_ONCE(vmas && (gup_flags & FOLL_UNLOCKABLE))) - return false; - *gup_flags_p = gup_flags; return true; } @@ -2219,11 +2198,11 @@ long get_user_pages_remote(struct mm_struct *mm, { int local_locked = 1; - if (!is_valid_gup_args(pages, NULL, locked, &gup_flags, + if (!is_valid_gup_args(pages, locked, &gup_flags, FOLL_TOUCH | FOLL_REMOTE)) return -EINVAL; - return __get_user_pages_locked(mm, start, nr_pages, pages, NULL, + return __get_user_pages_locked(mm, start, nr_pages, pages, locked ? locked : &local_locked, gup_flags); } @@ -2258,11 +2237,11 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, { int locked = 1; - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_TOUCH)) + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH)) return -EINVAL; return __get_user_pages_locked(current->mm, start, nr_pages, pages, - NULL, &locked, gup_flags); + &locked, gup_flags); } EXPORT_SYMBOL(get_user_pages); @@ -2286,12 +2265,12 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, { int locked = 0; - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_TOUCH | FOLL_UNLOCKABLE)) return -EINVAL; return __get_user_pages_locked(current->mm, start, nr_pages, pages, - NULL, &locked, gup_flags); + &locked, gup_flags); } EXPORT_SYMBOL(get_user_pages_unlocked); @@ -2981,7 +2960,7 @@ static int internal_get_user_pages_fast(unsigned long start, start += nr_pinned << PAGE_SHIFT; pages += nr_pinned; ret = __gup_longterm_locked(current->mm, start, nr_pages - nr_pinned, - pages, NULL, &locked, + pages, &locked, gup_flags | FOLL_TOUCH | FOLL_UNLOCKABLE); if (ret < 0) { /* @@ -3023,7 +3002,7 @@ int get_user_pages_fast_only(unsigned long start, int nr_pages, * FOLL_FAST_ONLY is required in order to match the API description of * this routine: no fall back to regular ("slow") GUP. */ - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET | FOLL_FAST_ONLY)) return -EINVAL; @@ -3056,7 +3035,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, * FOLL_GET, because gup fast is always a "pin with a +1 page refcount" * request. */ - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_GET)) + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_GET)) return -EINVAL; return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); } @@ -3081,7 +3060,7 @@ EXPORT_SYMBOL_GPL(get_user_pages_fast); int pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) { - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_PIN)) + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) return -EINVAL; return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages); } @@ -3114,10 +3093,10 @@ long pin_user_pages_remote(struct mm_struct *mm, { int local_locked = 1; - if (!is_valid_gup_args(pages, NULL, locked, &gup_flags, + if (!is_valid_gup_args(pages, locked, &gup_flags, FOLL_PIN | FOLL_TOUCH | FOLL_REMOTE)) return 0; - return __gup_longterm_locked(mm, start, nr_pages, pages, NULL, + return __gup_longterm_locked(mm, start, nr_pages, pages, locked ? locked : &local_locked, gup_flags); } @@ -3143,10 +3122,10 @@ long pin_user_pages(unsigned long start, unsigned long nr_pages, { int locked = 1; - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, FOLL_PIN)) + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN)) return 0; return __gup_longterm_locked(current->mm, start, nr_pages, - pages, NULL, &locked, gup_flags); + pages, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages); @@ -3160,11 +3139,11 @@ long pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, { int locked = 0; - if (!is_valid_gup_args(pages, NULL, NULL, &gup_flags, + if (!is_valid_gup_args(pages, NULL, &gup_flags, FOLL_PIN | FOLL_TOUCH | FOLL_UNLOCKABLE)) return 0; - return __gup_longterm_locked(current->mm, start, nr_pages, pages, NULL, + return __gup_longterm_locked(current->mm, start, nr_pages, pages, &locked, gup_flags); } EXPORT_SYMBOL(pin_user_pages_unlocked); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index f154019e6b84..ea24718db4af 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6425,17 +6425,14 @@ out_release_nounlock: } #endif /* CONFIG_USERFAULTFD */ -static void record_subpages_vmas(struct page *page, struct vm_area_struct *vma, - int refs, struct page **pages, - struct vm_area_struct **vmas) +static void record_subpages(struct page *page, struct vm_area_struct *vma, + int refs, struct page **pages) { int nr; for (nr = 0; nr < refs; nr++) { if (likely(pages)) pages[nr] = nth_page(page, nr); - if (vmas) - vmas[nr] = vma; } } @@ -6508,9 +6505,9 @@ out_unlock: } long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, - struct page **pages, struct vm_area_struct **vmas, - unsigned long *position, unsigned long *nr_pages, - long i, unsigned int flags, int *locked) + struct page **pages, unsigned long *position, + unsigned long *nr_pages, long i, unsigned int flags, + int *locked) { unsigned long pfn_offset; unsigned long vaddr = *position; @@ -6638,7 +6635,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, * If subpage information not requested, update counters * and skip the same_page loop below. */ - if (!pages && !vmas && !pfn_offset && + if (!pages && !pfn_offset && (vaddr + huge_page_size(h) < vma->vm_end) && (remainder >= pages_per_huge_page(h))) { vaddr += huge_page_size(h); @@ -6653,11 +6650,10 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, refs = min3(pages_per_huge_page(h) - pfn_offset, remainder, (vma->vm_end - ALIGN_DOWN(vaddr, PAGE_SIZE)) >> PAGE_SHIFT); - if (pages || vmas) - record_subpages_vmas(nth_page(page, pfn_offset), - vma, refs, - likely(pages) ? pages + i : NULL, - vmas ? vmas + i : NULL); + if (pages) + record_subpages(nth_page(page, pfn_offset), + vma, refs, + likely(pages) ? pages + i : NULL); if (pages) { /* -- cgit v1.2.3 From 4e096ae1801e24b338e02715c65c3ffa8883ba5d Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Sat, 13 May 2023 01:11:01 +0100 Subject: mm: convert migrate_pages() to work on folios Almost all of the callers & implementors of migrate_pages() were already converted to use folios. compaction_alloc() & compaction_free() are trivial to convert a part of this patch and not worth splitting out. Link: https://lkml.kernel.org/r/20230513001101.276972-1-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Reviewed-by: "Huang, Ying" Signed-off-by: Andrew Morton --- Documentation/mm/page_migration.rst | 7 +- .../translations/zh_CN/mm/page_migration.rst | 2 +- include/linux/migrate.h | 16 +- mm/compaction.c | 15 +- mm/mempolicy.c | 15 +- mm/migrate.c | 161 ++++++++++----------- mm/vmscan.c | 15 +- 7 files changed, 108 insertions(+), 123 deletions(-) (limited to 'mm') diff --git a/Documentation/mm/page_migration.rst b/Documentation/mm/page_migration.rst index 313dce18893e..e35af7805be5 100644 --- a/Documentation/mm/page_migration.rst +++ b/Documentation/mm/page_migration.rst @@ -73,14 +73,13 @@ In kernel use of migrate_pages() It also prevents the swapper or other scans from encountering the page. -2. We need to have a function of type new_page_t that can be +2. We need to have a function of type new_folio_t that can be passed to migrate_pages(). This function should figure out - how to allocate the correct new page given the old page. + how to allocate the correct new folio given the old folio. 3. The migrate_pages() function is called which attempts to do the migration. It will call the function to allocate - the new page for each page that is considered for - moving. + the new folio for each folio that is considered for moving. How migrate_pages() works ========================= diff --git a/Documentation/translations/zh_CN/mm/page_migration.rst b/Documentation/translations/zh_CN/mm/page_migration.rst index 076081dc1635..f95063826a15 100644 --- a/Documentation/translations/zh_CN/mm/page_migration.rst +++ b/Documentation/translations/zh_CN/mm/page_migration.rst @@ -55,7 +55,7 @@ mbind()设置一个新的内存策略。一个进程的页面也可以通过sys_ 消失。它还可以防止交换器或其他扫描器遇到该页。 -2. 我们需要有一个new_page_t类型的函数,可以传递给migrate_pages()。这个函数应该计算 +2. 我们需要有一个new_folio_t类型的函数,可以传递给migrate_pages()。这个函数应该计算 出如何在给定的旧页面中分配正确的新页面。 3. migrate_pages()函数被调用,它试图进行迁移。它将调用该函数为每个被考虑迁移的页面分 diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 6241a1596a75..6de5756d8533 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -7,8 +7,8 @@ #include #include -typedef struct page *new_page_t(struct page *page, unsigned long private); -typedef void free_page_t(struct page *page, unsigned long private); +typedef struct folio *new_folio_t(struct folio *folio, unsigned long private); +typedef void free_folio_t(struct folio *folio, unsigned long private); struct migration_target_control; @@ -67,10 +67,10 @@ int migrate_folio_extra(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode, int extra_count); int migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode); -int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, +int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded); -struct page *alloc_migration_target(struct page *page, unsigned long private); +struct folio *alloc_migration_target(struct folio *src, unsigned long private); bool isolate_movable_page(struct page *page, isolate_mode_t mode); int migrate_huge_page_move_mapping(struct address_space *mapping, @@ -85,11 +85,11 @@ int folio_migrate_mapping(struct address_space *mapping, #else static inline void putback_movable_pages(struct list_head *l) {} -static inline int migrate_pages(struct list_head *l, new_page_t new, - free_page_t free, unsigned long private, enum migrate_mode mode, - int reason, unsigned int *ret_succeeded) +static inline int migrate_pages(struct list_head *l, new_folio_t new, + free_folio_t free, unsigned long private, + enum migrate_mode mode, int reason, unsigned int *ret_succeeded) { return -ENOSYS; } -static inline struct page *alloc_migration_target(struct page *page, +static inline struct folio *alloc_migration_target(struct folio *src, unsigned long private) { return NULL; } static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) diff --git a/mm/compaction.c b/mm/compaction.c index f6465ae74d3f..e23e00bec030 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1685,11 +1685,10 @@ splitmap: * This is a migrate-callback that "allocates" freepages by taking pages * from the isolated freelists in the block we are migrating to. */ -static struct page *compaction_alloc(struct page *migratepage, - unsigned long data) +static struct folio *compaction_alloc(struct folio *src, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; - struct page *freepage; + struct folio *dst; if (list_empty(&cc->freepages)) { isolate_freepages(cc); @@ -1698,11 +1697,11 @@ static struct page *compaction_alloc(struct page *migratepage, return NULL; } - freepage = list_entry(cc->freepages.next, struct page, lru); - list_del(&freepage->lru); + dst = list_entry(cc->freepages.next, struct folio, lru); + list_del(&dst->lru); cc->nr_freepages--; - return freepage; + return dst; } /* @@ -1710,11 +1709,11 @@ static struct page *compaction_alloc(struct page *migratepage, * freelist. All pages on the freelist are from the same zone, so there is no * special handling needed for NUMA. */ -static void compaction_free(struct page *page, unsigned long data) +static void compaction_free(struct folio *dst, unsigned long data) { struct compact_control *cc = (struct compact_control *)data; - list_add(&page->lru, &cc->freepages); + list_add(&dst->lru, &cc->freepages); cc->nr_freepages++; } diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 1756389a0609..f06ca8c18e62 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1195,24 +1195,22 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, * list of pages handed to migrate_pages()--which is how we get here-- * is in virtual address order. */ -static struct page *new_page(struct page *page, unsigned long start) +static struct folio *new_folio(struct folio *src, unsigned long start) { - struct folio *dst, *src = page_folio(page); struct vm_area_struct *vma; unsigned long address; VMA_ITERATOR(vmi, current->mm, start); gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; for_each_vma(vmi, vma) { - address = page_address_in_vma(page, vma); + address = page_address_in_vma(&src->page, vma); if (address != -EFAULT) break; } if (folio_test_hugetlb(src)) { - dst = alloc_hugetlb_folio_vma(folio_hstate(src), + return alloc_hugetlb_folio_vma(folio_hstate(src), vma, address); - return &dst->page; } if (folio_test_large(src)) @@ -1221,9 +1219,8 @@ static struct page *new_page(struct page *page, unsigned long start) /* * if !vma, vma_alloc_folio() will use task or system default policy */ - dst = vma_alloc_folio(gfp, folio_order(src), vma, address, + return vma_alloc_folio(gfp, folio_order(src), vma, address, folio_test_large(src)); - return &dst->page; } #else @@ -1239,7 +1236,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, return -ENOSYS; } -static struct page *new_page(struct page *page, unsigned long start) +static struct folio *new_folio(struct folio *src, unsigned long start) { return NULL; } @@ -1334,7 +1331,7 @@ static long do_mbind(unsigned long start, unsigned long len, if (!list_empty(&pagelist)) { WARN_ON_ONCE(flags & MPOL_MF_LAZY); - nr_failed = migrate_pages(&pagelist, new_page, NULL, + nr_failed = migrate_pages(&pagelist, new_folio, NULL, start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); if (nr_failed) putback_movable_pages(&pagelist); diff --git a/mm/migrate.c b/mm/migrate.c index cb292d2a90ce..30b5ce10935e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1067,15 +1067,13 @@ static void migrate_folio_undo_src(struct folio *src, } /* Restore the destination folio to the original state upon failure */ -static void migrate_folio_undo_dst(struct folio *dst, - bool locked, - free_page_t put_new_page, - unsigned long private) +static void migrate_folio_undo_dst(struct folio *dst, bool locked, + free_folio_t put_new_folio, unsigned long private) { if (locked) folio_unlock(dst); - if (put_new_page) - put_new_page(&dst->page, private); + if (put_new_folio) + put_new_folio(dst, private); else folio_put(dst); } @@ -1099,14 +1097,13 @@ static void migrate_folio_done(struct folio *src, } /* Obtain the lock on page, remove all ptes. */ -static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page, - unsigned long private, struct folio *src, - struct folio **dstp, enum migrate_mode mode, - enum migrate_reason reason, struct list_head *ret) +static int migrate_folio_unmap(new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + struct folio *src, struct folio **dstp, enum migrate_mode mode, + enum migrate_reason reason, struct list_head *ret) { struct folio *dst; int rc = -EAGAIN; - struct page *newpage = NULL; int page_was_mapped = 0; struct anon_vma *anon_vma = NULL; bool is_lru = !__PageMovable(&src->page); @@ -1123,10 +1120,9 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page return MIGRATEPAGE_SUCCESS; } - newpage = get_new_page(&src->page, private); - if (!newpage) + dst = get_new_folio(src, private); + if (!dst) return -ENOMEM; - dst = page_folio(newpage); *dstp = dst; dst->private = NULL; @@ -1254,13 +1250,13 @@ out: ret = NULL; migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret); - migrate_folio_undo_dst(dst, dst_locked, put_new_page, private); + migrate_folio_undo_dst(dst, dst_locked, put_new_folio, private); return rc; } /* Migrate the folio to the newly allocated folio in dst. */ -static int migrate_folio_move(free_page_t put_new_page, unsigned long private, +static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private, struct folio *src, struct folio *dst, enum migrate_mode mode, enum migrate_reason reason, struct list_head *ret) @@ -1332,7 +1328,7 @@ out: } migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret); - migrate_folio_undo_dst(dst, true, put_new_page, private); + migrate_folio_undo_dst(dst, true, put_new_folio, private); return rc; } @@ -1355,16 +1351,14 @@ out: * because then pte is replaced with migration swap entry and direct I/O code * will wait in the page fault for migration to complete. */ -static int unmap_and_move_huge_page(new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, - struct page *hpage, int force, - enum migrate_mode mode, int reason, - struct list_head *ret) +static int unmap_and_move_huge_page(new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + struct folio *src, int force, enum migrate_mode mode, + int reason, struct list_head *ret) { - struct folio *dst, *src = page_folio(hpage); + struct folio *dst; int rc = -EAGAIN; int page_was_mapped = 0; - struct page *new_hpage; struct anon_vma *anon_vma = NULL; struct address_space *mapping = NULL; @@ -1374,10 +1368,9 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, return MIGRATEPAGE_SUCCESS; } - new_hpage = get_new_page(hpage, private); - if (!new_hpage) + dst = get_new_folio(src, private); + if (!dst) return -ENOMEM; - dst = page_folio(new_hpage); if (!folio_trylock(src)) { if (!force) @@ -1418,7 +1411,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, * semaphore in write mode here and set TTU_RMAP_LOCKED * to let lower levels know we have taken the lock. */ - mapping = hugetlb_page_mapping_lock_write(hpage); + mapping = hugetlb_page_mapping_lock_write(&src->page); if (unlikely(!mapping)) goto unlock_put_anon; @@ -1448,7 +1441,7 @@ put_anon: if (rc == MIGRATEPAGE_SUCCESS) { move_hugetlb_state(src, dst, reason); - put_new_page = NULL; + put_new_folio = NULL; } out_unlock: @@ -1464,8 +1457,8 @@ out: * it. Otherwise, put_page() will drop the reference grabbed during * isolation. */ - if (put_new_page) - put_new_page(new_hpage, private); + if (put_new_folio) + put_new_folio(dst, private); else folio_putback_active_hugetlb(dst); @@ -1512,8 +1505,8 @@ struct migrate_pages_stats { * exist any more. It is caller's responsibility to call putback_movable_pages() * only if ret != 0. */ -static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, +static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, enum migrate_mode mode, int reason, struct migrate_pages_stats *stats, struct list_head *ret_folios) @@ -1551,9 +1544,9 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page, continue; } - rc = unmap_and_move_huge_page(get_new_page, - put_new_page, private, - &folio->page, pass > 2, mode, + rc = unmap_and_move_huge_page(get_new_folio, + put_new_folio, private, + folio, pass > 2, mode, reason, ret_folios); /* * The rules are: @@ -1610,11 +1603,11 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page, * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the * length of the from list must be <= 1. */ -static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, - enum migrate_mode mode, int reason, struct list_head *ret_folios, - struct list_head *split_folios, struct migrate_pages_stats *stats, - int nr_pass) +static int migrate_pages_batch(struct list_head *from, + new_folio_t get_new_folio, free_folio_t put_new_folio, + unsigned long private, enum migrate_mode mode, int reason, + struct list_head *ret_folios, struct list_head *split_folios, + struct migrate_pages_stats *stats, int nr_pass) { int retry = 1; int thp_retry = 1; @@ -1664,8 +1657,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, continue; } - rc = migrate_folio_unmap(get_new_page, put_new_page, private, - folio, &dst, mode, reason, ret_folios); + rc = migrate_folio_unmap(get_new_folio, put_new_folio, + private, folio, &dst, mode, reason, + ret_folios); /* * The rules are: * Success: folio will be freed @@ -1762,7 +1756,7 @@ move: cond_resched(); - rc = migrate_folio_move(put_new_page, private, + rc = migrate_folio_move(put_new_folio, private, folio, dst, mode, reason, ret_folios); /* @@ -1808,7 +1802,7 @@ out: migrate_folio_undo_src(folio, page_was_mapped, anon_vma, true, ret_folios); list_del(&dst->lru); - migrate_folio_undo_dst(dst, true, put_new_page, private); + migrate_folio_undo_dst(dst, true, put_new_folio, private); dst = dst2; dst2 = list_next_entry(dst, lru); } @@ -1816,10 +1810,11 @@ out: return rc; } -static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, - enum migrate_mode mode, int reason, struct list_head *ret_folios, - struct list_head *split_folios, struct migrate_pages_stats *stats) +static int migrate_pages_sync(struct list_head *from, new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, + enum migrate_mode mode, int reason, + struct list_head *ret_folios, struct list_head *split_folios, + struct migrate_pages_stats *stats) { int rc, nr_failed = 0; LIST_HEAD(folios); @@ -1827,7 +1822,7 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, memset(&astats, 0, sizeof(astats)); /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */ - rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC, + rc = migrate_pages_batch(from, get_new_folio, put_new_folio, private, MIGRATE_ASYNC, reason, &folios, split_folios, &astats, NR_MAX_MIGRATE_ASYNC_RETRY); stats->nr_succeeded += astats.nr_succeeded; @@ -1849,7 +1844,7 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, list_splice_tail_init(&folios, from); while (!list_empty(from)) { list_move(from->next, &folios); - rc = migrate_pages_batch(&folios, get_new_page, put_new_page, + rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, private, mode, reason, ret_folios, split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY); list_splice_tail_init(&folios, ret_folios); @@ -1866,11 +1861,11 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, * supplied as the target for the page migration * * @from: The list of folios to be migrated. - * @get_new_page: The function used to allocate free folios to be used + * @get_new_folio: The function used to allocate free folios to be used * as the target of the folio migration. - * @put_new_page: The function used to free target folios if migration + * @put_new_folio: The function used to free target folios if migration * fails, or NULL if no special handling is necessary. - * @private: Private data to be passed on to get_new_page() + * @private: Private data to be passed on to get_new_folio() * @mode: The migration mode that specifies the constraints for * folio migration, if any. * @reason: The reason for folio migration. @@ -1887,8 +1882,8 @@ static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, * considered as the number of non-migrated large folio, no matter how many * split folios of the large folio are migrated successfully. */ -int migrate_pages(struct list_head *from, new_page_t get_new_page, - free_page_t put_new_page, unsigned long private, +int migrate_pages(struct list_head *from, new_folio_t get_new_folio, + free_folio_t put_new_folio, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded) { int rc, rc_gather; @@ -1903,7 +1898,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, memset(&stats, 0, sizeof(stats)); - rc_gather = migrate_hugetlbs(from, get_new_page, put_new_page, private, + rc_gather = migrate_hugetlbs(from, get_new_folio, put_new_folio, private, mode, reason, &stats, &ret_folios); if (rc_gather < 0) goto out; @@ -1926,12 +1921,14 @@ again: else list_splice_init(from, &folios); if (mode == MIGRATE_ASYNC) - rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private, - mode, reason, &ret_folios, &split_folios, &stats, - NR_MAX_MIGRATE_PAGES_RETRY); + rc = migrate_pages_batch(&folios, get_new_folio, put_new_folio, + private, mode, reason, &ret_folios, + &split_folios, &stats, + NR_MAX_MIGRATE_PAGES_RETRY); else - rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private, - mode, reason, &ret_folios, &split_folios, &stats); + rc = migrate_pages_sync(&folios, get_new_folio, put_new_folio, + private, mode, reason, &ret_folios, + &split_folios, &stats); list_splice_tail_init(&folios, &ret_folios); if (rc < 0) { rc_gather = rc; @@ -1944,8 +1941,9 @@ again: * is counted as 1 failure already. And, we only try to migrate * with minimal effort, force MIGRATE_ASYNC mode and retry once. */ - migrate_pages_batch(&split_folios, get_new_page, put_new_page, private, - MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1); + migrate_pages_batch(&split_folios, get_new_folio, + put_new_folio, private, MIGRATE_ASYNC, reason, + &ret_folios, NULL, &stats, 1); list_splice_tail_init(&split_folios, &ret_folios); } rc_gather += rc; @@ -1980,14 +1978,11 @@ out: return rc_gather; } -struct page *alloc_migration_target(struct page *page, unsigned long private) +struct folio *alloc_migration_target(struct folio *src, unsigned long private) { - struct folio *folio = page_folio(page); struct migration_target_control *mtc; gfp_t gfp_mask; unsigned int order = 0; - struct folio *hugetlb_folio = NULL; - struct folio *new_folio = NULL; int nid; int zidx; @@ -1995,33 +1990,30 @@ struct page *alloc_migration_target(struct page *page, unsigned long private) gfp_mask = mtc->gfp_mask; nid = mtc->nid; if (nid == NUMA_NO_NODE) - nid = folio_nid(folio); + nid = folio_nid(src); - if (folio_test_hugetlb(folio)) { - struct hstate *h = folio_hstate(folio); + if (folio_test_hugetlb(src)) { + struct hstate *h = folio_hstate(src); gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); - hugetlb_folio = alloc_hugetlb_folio_nodemask(h, nid, + return alloc_hugetlb_folio_nodemask(h, nid, mtc->nmask, gfp_mask); - return &hugetlb_folio->page; } - if (folio_test_large(folio)) { + if (folio_test_large(src)) { /* * clear __GFP_RECLAIM to make the migration callback * consistent with regular THP allocations. */ gfp_mask &= ~__GFP_RECLAIM; gfp_mask |= GFP_TRANSHUGE; - order = folio_order(folio); + order = folio_order(src); } - zidx = zone_idx(folio_zone(folio)); + zidx = zone_idx(folio_zone(src)); if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) gfp_mask |= __GFP_HIGHMEM; - new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask); - - return &new_folio->page; + return __folio_alloc(gfp_mask, order, nid, mtc->nmask); } #ifdef CONFIG_NUMA @@ -2472,13 +2464,12 @@ static bool migrate_balanced_pgdat(struct pglist_data *pgdat, return false; } -static struct page *alloc_misplaced_dst_page(struct page *page, +static struct folio *alloc_misplaced_dst_folio(struct folio *src, unsigned long data) { int nid = (int) data; - int order = compound_order(page); + int order = folio_order(src); gfp_t gfp = __GFP_THISNODE; - struct folio *new; if (order > 0) gfp |= GFP_TRANSHUGE_LIGHT; @@ -2487,9 +2478,7 @@ static struct page *alloc_misplaced_dst_page(struct page *page, __GFP_NOWARN; gfp &= ~__GFP_RECLAIM; } - new = __folio_alloc_node(gfp, order, nid); - - return &new->page; + return __folio_alloc_node(gfp, order, nid); } static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) @@ -2567,7 +2556,7 @@ int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma, goto out; list_add(&page->lru, &migratepages); - nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, + nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio, NULL, node, MIGRATE_ASYNC, MR_NUMA_MISPLACED, &nr_succeeded); if (nr_remaining) { diff --git a/mm/vmscan.c b/mm/vmscan.c index 15efbfbb1963..4637f6462e9c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -1621,9 +1621,10 @@ static void folio_check_dirty_writeback(struct folio *folio, mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); } -static struct page *alloc_demote_page(struct page *page, unsigned long private) +static struct folio *alloc_demote_folio(struct folio *src, + unsigned long private) { - struct page *target_page; + struct folio *dst; nodemask_t *allowed_mask; struct migration_target_control *mtc; @@ -1641,14 +1642,14 @@ static struct page *alloc_demote_page(struct page *page, unsigned long private) */ mtc->nmask = NULL; mtc->gfp_mask |= __GFP_THISNODE; - target_page = alloc_migration_target(page, (unsigned long)mtc); - if (target_page) - return target_page; + dst = alloc_migration_target(src, (unsigned long)mtc); + if (dst) + return dst; mtc->gfp_mask &= ~__GFP_THISNODE; mtc->nmask = allowed_mask; - return alloc_migration_target(page, (unsigned long)mtc); + return alloc_migration_target(src, (unsigned long)mtc); } /* @@ -1683,7 +1684,7 @@ static unsigned int demote_folio_list(struct list_head *demote_folios, node_get_allowed_targets(pgdat, &allowed_mask); /* Demotion ignores all cpuset and mempolicy settings */ - migrate_pages(demote_folios, alloc_demote_page, NULL, + migrate_pages(demote_folios, alloc_demote_folio, NULL, (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, &nr_succeeded); -- cgit v1.2.3 From 89f499f35c11af61ba7075ddc23209d10805a25a Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 18 May 2023 10:55:14 -0400 Subject: maple_tree: add format option to mt_dump() Allow different formatting strings to be used when dumping the tree. Currently supports hex and decimal. Link: https://lkml.kernel.org/r/20230518145544.1722059-6-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: David Binderman Cc: Peng Zhang Cc: Sergey Senozhatsky Cc: Vernon Yang Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/maple_tree.h | 9 ++++- lib/maple_tree.c | 87 ++++++++++++++++++++++++++-------------- lib/test_maple_tree.c | 10 ++--- mm/internal.h | 4 +- mm/mmap.c | 8 ++-- tools/testing/radix-tree/maple.c | 12 +++--- 6 files changed, 82 insertions(+), 48 deletions(-) (limited to 'mm') diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 1fadb5f5978b..140fb271be4a 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -670,10 +670,15 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max); #ifdef CONFIG_DEBUG_MAPLE_TREE +enum mt_dump_format { + mt_dump_dec, + mt_dump_hex, +}; + extern atomic_t maple_tree_tests_run; extern atomic_t maple_tree_tests_passed; -void mt_dump(const struct maple_tree *mt); +void mt_dump(const struct maple_tree *mt, enum mt_dump_format format); void mt_validate(struct maple_tree *mt); void mt_cache_shrink(void); #define MT_BUG_ON(__tree, __x) do { \ @@ -681,7 +686,7 @@ void mt_cache_shrink(void); if (__x) { \ pr_info("BUG at %s:%d (%u)\n", \ __func__, __LINE__, __x); \ - mt_dump(__tree); \ + mt_dump(__tree, mt_dump_hex); \ pr_info("Pass: %u Run:%u\n", \ atomic_read(&maple_tree_tests_passed), \ atomic_read(&maple_tree_tests_run)); \ diff --git a/lib/maple_tree.c b/lib/maple_tree.c index e095e2c39a1b..dfa0271101d2 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -5694,7 +5694,7 @@ void *mas_store(struct ma_state *mas, void *entry) trace_ma_write(__func__, mas, 0, entry); #ifdef CONFIG_DEBUG_MAPLE_TREE if (mas->index > mas->last) - pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry); + pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry); MT_BUG_ON(mas->tree, mas->index > mas->last); if (mas->index > mas->last) { mas_set_err(mas, -EINVAL); @@ -6748,22 +6748,33 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) /* Tree validations */ static void mt_dump_node(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth); + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format); static void mt_dump_range(unsigned long min, unsigned long max, - unsigned int depth) + unsigned int depth, enum mt_dump_format format) { static const char spaces[] = " "; - if (min == max) - pr_info("%.*s%lu: ", depth * 2, spaces, min); - else - pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); + switch(format) { + case mt_dump_hex: + if (min == max) + pr_info("%.*s%lx: ", depth * 2, spaces, min); + else + pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max); + break; + default: + case mt_dump_dec: + if (min == max) + pr_info("%.*s%lu: ", depth * 2, spaces, min); + else + pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); + } } static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, - unsigned int depth) + unsigned int depth, enum mt_dump_format format) { - mt_dump_range(min, max, depth); + mt_dump_range(min, max, depth, format); if (xa_is_value(entry)) pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), @@ -6777,7 +6788,8 @@ static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, } static void mt_dump_range64(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth) + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format) { struct maple_range_64 *node = &mte_to_node(entry)->mr64; bool leaf = mte_is_leaf(entry); @@ -6785,8 +6797,16 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry, int i; pr_cont(" contents: "); - for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) - pr_cont("%p %lu ", node->slot[i], node->pivot[i]); + for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) { + switch(format) { + case mt_dump_hex: + pr_cont("%p %lX ", node->slot[i], node->pivot[i]); + break; + default: + case mt_dump_dec: + pr_cont("%p %lu ", node->slot[i], node->pivot[i]); + } + } pr_cont("%p\n", node->slot[i]); for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { unsigned long last = max; @@ -6799,24 +6819,32 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry, break; if (leaf) mt_dump_entry(mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); else if (node->slot[i]) mt_dump_node(mt, mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); if (last == max) break; if (last > max) { - pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + switch(format) { + case mt_dump_hex: + pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n", node, last, max, i); - break; + break; + default: + case mt_dump_dec: + pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + node, last, max, i); + } } first = last + 1; } } static void mt_dump_arange64(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth) + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format) { struct maple_arange_64 *node = &mte_to_node(entry)->ma64; bool leaf = mte_is_leaf(entry); @@ -6841,10 +6869,10 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry, break; if (leaf) mt_dump_entry(mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); else if (node->slot[i]) mt_dump_node(mt, mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); if (last == max) break; @@ -6858,13 +6886,14 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry, } static void mt_dump_node(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth) + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format) { struct maple_node *node = mte_to_node(entry); unsigned int type = mte_node_type(entry); unsigned int i; - mt_dump_range(min, max, depth); + mt_dump_range(min, max, depth, format); pr_cont("node %p depth %d type %d parent %p", node, depth, type, node ? node->parent : NULL); @@ -6875,15 +6904,15 @@ static void mt_dump_node(const struct maple_tree *mt, void *entry, if (min + i > max) pr_cont("OUT OF RANGE: "); mt_dump_entry(mt_slot(mt, node->slot, i), - min + i, min + i, depth); + min + i, min + i, depth, format); } break; case maple_leaf_64: case maple_range_64: - mt_dump_range64(mt, entry, min, max, depth); + mt_dump_range64(mt, entry, min, max, depth, format); break; case maple_arange_64: - mt_dump_arange64(mt, entry, min, max, depth); + mt_dump_arange64(mt, entry, min, max, depth, format); break; default: @@ -6891,16 +6920,16 @@ static void mt_dump_node(const struct maple_tree *mt, void *entry, } } -void mt_dump(const struct maple_tree *mt) +void mt_dump(const struct maple_tree *mt, enum mt_dump_format format) { void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); pr_info("maple_tree(%p) flags %X, height %u root %p\n", mt, mt->ma_flags, mt_height(mt), entry); if (!xa_is_node(entry)) - mt_dump_entry(entry, 0, 0, 0); + mt_dump_entry(entry, 0, 0, 0, format); else if (entry) - mt_dump_node(mt, entry, 0, mt_node_max(entry), 0); + mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format); } EXPORT_SYMBOL_GPL(mt_dump); @@ -6953,7 +6982,7 @@ static void mas_validate_gaps(struct ma_state *mas) mas_mn(mas), i, mas_get_slot(mas, i), gap, p_end, p_start); - mt_dump(mas->tree); + mt_dump(mas->tree, mt_dump_hex); MT_BUG_ON(mas->tree, gap != p_end - p_start + 1); @@ -6986,7 +7015,7 @@ counted: MT_BUG_ON(mas->tree, max_gap > mas->max); if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) { pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); - mt_dump(mas->tree); + mt_dump(mas->tree, mt_dump_hex); } MT_BUG_ON(mas->tree, diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index f1db333270e9..d6929270dd36 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -219,7 +219,7 @@ static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max, #ifndef __KERNEL__ if (verbose) { rcu_barrier(); - mt_dump(mt); + mt_dump(mt, mt_dump_dec); pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n", __func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(), mt_nr_tallocated()); @@ -248,7 +248,7 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max, #ifndef __KERNEL__ if (verbose) { rcu_barrier(); - mt_dump(mt); + mt_dump(mt, mt_dump_dec); pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n", max, mt_get_alloc_size()/1024, mt_nr_allocated(), mt_nr_tallocated()); @@ -893,7 +893,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) #if DEBUG_ALLOC_RANGE pr_debug("\tInsert %lu-%lu\n", range[i] >> 12, (range[i + 1] >> 12) - 1); - mt_dump(mt); + mt_dump(mt, mt_dump_hex); #endif check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1, xa_mk_value(range[i] >> 12), 0); @@ -934,7 +934,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) xa_mk_value(req_range[i] >> 12)); /* pointer */ mt_validate(mt); #if DEBUG_ALLOC_RANGE - mt_dump(mt); + mt_dump(mt, mt_dump_hex); #endif } @@ -1572,7 +1572,7 @@ static noinline void check_node_overwrite(struct maple_tree *mt) mtree_test_store_range(mt, i*100, i*100 + 50, xa_mk_value(i*100)); mtree_test_store_range(mt, 319951, 367950, NULL); - /*mt_dump(mt); */ + /*mt_dump(mt, mt_dump_dec); */ mt_validate(mt); } diff --git a/mm/internal.h b/mm/internal.h index ec55da813c13..692498a84fde 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1064,13 +1064,13 @@ static inline void vma_iter_store(struct vma_iterator *vmi, printk("%lu > %lu\n", vmi->mas.index, vma->vm_start); printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); - mt_dump(vmi->mas.tree); + mt_dump(vmi->mas.tree, mt_dump_hex); } if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last < vma->vm_start)) { printk("%lu < %lu\n", vmi->mas.last, vma->vm_start); printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); - mt_dump(vmi->mas.tree); + mt_dump(vmi->mas.tree, mt_dump_hex); } #endif diff --git a/mm/mmap.c b/mm/mmap.c index 13678edaa22c..04bcf3b3c720 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -301,7 +301,7 @@ out: #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) extern void mt_validate(struct maple_tree *mt); -extern void mt_dump(const struct maple_tree *mt); +extern void mt_dump(const struct maple_tree *mt, enum mt_dump_format fmt); /* Validate the maple tree */ static void validate_mm_mt(struct mm_struct *mm) @@ -323,18 +323,18 @@ static void validate_mm_mt(struct mm_struct *mm) pr_emerg("mt vma: %p %lu - %lu\n", vma_mt, vma_mt->vm_start, vma_mt->vm_end); - mt_dump(mas.tree); + mt_dump(mas.tree, mt_dump_hex); if (vma_mt->vm_end != mas.last + 1) { pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n", mm, vma_mt->vm_start, vma_mt->vm_end, mas.index, mas.last); - mt_dump(mas.tree); + mt_dump(mas.tree, mt_dump_hex); } VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm); if (vma_mt->vm_start != mas.index) { pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n", mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end); - mt_dump(mas.tree); + mt_dump(mas.tree, mt_dump_hex); } VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm); } diff --git a/tools/testing/radix-tree/maple.c b/tools/testing/radix-tree/maple.c index 75df543e019c..ebcb3faf85ea 100644 --- a/tools/testing/radix-tree/maple.c +++ b/tools/testing/radix-tree/maple.c @@ -1054,7 +1054,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt, if (entry_count) MT_BUG_ON(mt, !mt_height(mt)); #if check_erase2_debug > 1 - mt_dump(mt); + mt_dump(mt, mt_dump_hex); #endif #if check_erase2_debug pr_err("Done\n"); @@ -1085,7 +1085,7 @@ static noinline void check_erase2_testset(struct maple_tree *mt, mas_for_each(&mas, foo, ULONG_MAX) { if (xa_is_zero(foo)) { if (addr == mas.index) { - mt_dump(mas.tree); + mt_dump(mas.tree, mt_dump_hex); pr_err("retry failed %lu - %lu\n", mas.index, mas.last); MT_BUG_ON(mt, 1); @@ -34513,7 +34513,7 @@ static void *rcu_reader_rev(void *ptr) if (mas.index != r_start) { alt = xa_mk_value(index + i * 2 + 1 + RCU_RANGE_COUNT); - mt_dump(test->mt); + mt_dump(test->mt, mt_dump_dec); printk("Error: %lu-%lu %p != %lu-%lu %p %p line %d i %d\n", mas.index, mas.last, entry, r_start, r_end, expected, alt, @@ -35784,10 +35784,10 @@ void farmer_tests(void) struct maple_node *node; DEFINE_MTREE(tree); - mt_dump(&tree); + mt_dump(&tree, mt_dump_dec); tree.ma_root = xa_mk_value(0); - mt_dump(&tree); + mt_dump(&tree, mt_dump_dec); node = mt_alloc_one(GFP_KERNEL); node->parent = (void *)((unsigned long)(&tree) | 1); @@ -35797,7 +35797,7 @@ void farmer_tests(void) node->mr64.pivot[1] = 1; node->mr64.pivot[2] = 0; tree.ma_root = mt_mk_node(node, maple_leaf_64); - mt_dump(&tree); + mt_dump(&tree, mt_dump_dec); node->parent = ma_parent_ptr(node); ma_free_rcu(node); -- cgit v1.2.3 From b50e195ff436625b26dcc9839bc52cc7c5bf1a54 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 18 May 2023 10:55:26 -0400 Subject: mm: update validate_mm() to use vma iterator Use the vma iterator in the validation code and combine the code to check the maple tree into the main validate_mm() function. Introduce a new function vma_iter_dump_tree() to dump the maple tree in hex layout. Replace all calls to validate_mm_mt() with validate_mm(). [Liam.Howlett@oracle.com: update validate_mm() to use vma iterator CONFIG flag] Link: https://lkml.kernel.org/r/20230606183538.588190-1-Liam.Howlett@oracle.com Link: https://lkml.kernel.org/r/20230518145544.1722059-18-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: David Binderman Cc: Peng Zhang Cc: Sergey Senozhatsky Cc: Vernon Yang Cc: Wei Yang Signed-off-by: Andrew Morton --- include/linux/mmdebug.h | 14 ++++++++ mm/debug.c | 9 +++++ mm/internal.h | 3 +- mm/mmap.c | 94 +++++++++++++++++++------------------------------ 4 files changed, 61 insertions(+), 59 deletions(-) (limited to 'mm') diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index b8728d11c949..7c3e7b0b0e8f 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -8,10 +8,12 @@ struct page; struct vm_area_struct; struct mm_struct; +struct vma_iterator; void dump_page(struct page *page, const char *reason); void dump_vma(const struct vm_area_struct *vma); void dump_mm(const struct mm_struct *mm); +void vma_iter_dump_tree(const struct vma_iterator *vmi); #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) @@ -74,6 +76,17 @@ void dump_mm(const struct mm_struct *mm); } \ unlikely(__ret_warn_once); \ }) +#define VM_WARN_ON_ONCE_MM(cond, mm) ({ \ + static bool __section(".data.once") __warned; \ + int __ret_warn_once = !!(cond); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + dump_mm(mm); \ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) #define VM_WARN_ON(cond) (void)WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) (void)WARN_ON_ONCE(cond) @@ -90,6 +103,7 @@ void dump_mm(const struct mm_struct *mm); #define VM_WARN_ON_ONCE_PAGE(cond, page) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) #endif diff --git a/mm/debug.c b/mm/debug.c index c7b228097bd9..ee533a5ceb79 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -268,4 +268,13 @@ void page_init_poison(struct page *page, size_t size) if (page_init_poisoning) memset(page, PAGE_POISON_PATTERN, size); } + +void vma_iter_dump_tree(const struct vma_iterator *vmi) +{ +#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) + mas_dump(&vmi->mas); + mt_dump(vmi->mas.tree, mt_dump_hex); +#endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ +} + #endif /* CONFIG_DEBUG_VM */ diff --git a/mm/internal.h b/mm/internal.h index 692498a84fde..41cc5e6225fb 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1064,13 +1064,14 @@ static inline void vma_iter_store(struct vma_iterator *vmi, printk("%lu > %lu\n", vmi->mas.index, vma->vm_start); printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); - mt_dump(vmi->mas.tree, mt_dump_hex); + vma_iter_dump_tree(vmi); } if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last < vma->vm_start)) { printk("%lu < %lu\n", vmi->mas.last, vma->vm_start); printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); mt_dump(vmi->mas.tree, mt_dump_hex); + vma_iter_dump_tree(vmi); } #endif diff --git a/mm/mmap.c b/mm/mmap.c index 04bcf3b3c720..8f67d80c6dde 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -300,61 +300,40 @@ out: } #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) -extern void mt_validate(struct maple_tree *mt); -extern void mt_dump(const struct maple_tree *mt, enum mt_dump_format fmt); - -/* Validate the maple tree */ -static void validate_mm_mt(struct mm_struct *mm) -{ - struct maple_tree *mt = &mm->mm_mt; - struct vm_area_struct *vma_mt; - - MA_STATE(mas, mt, 0, 0); - - mt_validate(&mm->mm_mt); - mas_for_each(&mas, vma_mt, ULONG_MAX) { - if ((vma_mt->vm_start != mas.index) || - (vma_mt->vm_end - 1 != mas.last)) { - pr_emerg("issue in %s\n", current->comm); - dump_stack(); - dump_vma(vma_mt); - pr_emerg("mt piv: %p %lu - %lu\n", vma_mt, - mas.index, mas.last); - pr_emerg("mt vma: %p %lu - %lu\n", vma_mt, - vma_mt->vm_start, vma_mt->vm_end); - - mt_dump(mas.tree, mt_dump_hex); - if (vma_mt->vm_end != mas.last + 1) { - pr_err("vma: %p vma_mt %lu-%lu\tmt %lu-%lu\n", - mm, vma_mt->vm_start, vma_mt->vm_end, - mas.index, mas.last); - mt_dump(mas.tree, mt_dump_hex); - } - VM_BUG_ON_MM(vma_mt->vm_end != mas.last + 1, mm); - if (vma_mt->vm_start != mas.index) { - pr_err("vma: %p vma_mt %p %lu - %lu doesn't match\n", - mm, vma_mt, vma_mt->vm_start, vma_mt->vm_end); - mt_dump(mas.tree, mt_dump_hex); - } - VM_BUG_ON_MM(vma_mt->vm_start != mas.index, mm); - } - } -} - static void validate_mm(struct mm_struct *mm) { int bug = 0; int i = 0; struct vm_area_struct *vma; - MA_STATE(mas, &mm->mm_mt, 0, 0); - - validate_mm_mt(mm); + VMA_ITERATOR(vmi, mm, 0); - mas_for_each(&mas, vma, ULONG_MAX) { + mt_validate(&mm->mm_mt); + for_each_vma(vmi, vma) { #ifdef CONFIG_DEBUG_VM_RB struct anon_vma *anon_vma = vma->anon_vma; struct anon_vma_chain *avc; +#endif + unsigned long vmi_start, vmi_end; + bool warn = 0; + vmi_start = vma_iter_addr(&vmi); + vmi_end = vma_iter_end(&vmi); + if (VM_WARN_ON_ONCE_MM(vma->vm_end != vmi_end, mm)) + warn = 1; + + if (VM_WARN_ON_ONCE_MM(vma->vm_start != vmi_start, mm)) + warn = 1; + + if (warn) { + pr_emerg("issue in %s\n", current->comm); + dump_stack(); + dump_vma(vma); + pr_emerg("tree range: %px start %lx end %lx\n", vma, + vmi_start, vmi_end - 1); + vma_iter_dump_tree(&vmi); + } + +#ifdef CONFIG_DEBUG_VM_RB if (anon_vma) { anon_vma_lock_read(anon_vma); list_for_each_entry(avc, &vma->anon_vma_chain, same_vma) @@ -365,14 +344,13 @@ static void validate_mm(struct mm_struct *mm) i++; } if (i != mm->map_count) { - pr_emerg("map_count %d mas_for_each %d\n", mm->map_count, i); + pr_emerg("map_count %d vma iterator %d\n", mm->map_count, i); bug = 1; } VM_BUG_ON_MM(bug, mm); } #else /* !CONFIG_DEBUG_VM_MAPLE_TREE */ -#define validate_mm_mt(root) do { } while (0) #define validate_mm(mm) do { } while (0) #endif /* CONFIG_DEBUG_VM_MAPLE_TREE */ @@ -2234,7 +2212,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, struct vm_area_struct *new; int err; - validate_mm_mt(vma->vm_mm); + validate_mm(vma->vm_mm); WARN_ON(vma->vm_start >= addr); WARN_ON(vma->vm_end <= addr); @@ -2292,7 +2270,7 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, /* Success. */ if (new_below) vma_next(vmi); - validate_mm_mt(vma->vm_mm); + validate_mm(vma->vm_mm); return 0; out_free_mpol: @@ -2301,7 +2279,7 @@ out_free_vmi: vma_iter_free(vmi); out_free_vma: vm_area_free(new); - validate_mm_mt(vma->vm_mm); + validate_mm(vma->vm_mm); return err; } @@ -2936,7 +2914,7 @@ int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, arch_unmap(mm, start, end); ret = do_vmi_align_munmap(vmi, vma, mm, start, end, uf, downgrade); - validate_mm_mt(mm); + validate_mm(mm); return ret; } @@ -2958,7 +2936,7 @@ static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma, struct mm_struct *mm = current->mm; struct vma_prepare vp; - validate_mm_mt(mm); + validate_mm(mm); /* * Check against address space limits by the changed size * Note: This happens *after* clearing old mappings in some code paths. @@ -3199,7 +3177,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, bool faulted_in_anon_vma = true; VMA_ITERATOR(vmi, mm, addr); - validate_mm_mt(mm); + validate_mm(mm); /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. @@ -3258,7 +3236,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, goto out_vma_link; *need_rmap_locks = false; } - validate_mm_mt(mm); + validate_mm(mm); return new_vma; out_vma_link: @@ -3274,7 +3252,7 @@ out_free_mempol: out_free_vma: vm_area_free(new_vma); out: - validate_mm_mt(mm); + validate_mm(mm); return NULL; } @@ -3411,7 +3389,7 @@ static struct vm_area_struct *__install_special_mapping( int ret; struct vm_area_struct *vma; - validate_mm_mt(mm); + validate_mm(mm); vma = vm_area_alloc(mm); if (unlikely(vma == NULL)) return ERR_PTR(-ENOMEM); @@ -3434,12 +3412,12 @@ static struct vm_area_struct *__install_special_mapping( perf_event_mmap(vma); - validate_mm_mt(mm); + validate_mm(mm); return vma; out: vm_area_free(vma); - validate_mm_mt(mm); + validate_mm(mm); return ERR_PTR(ret); } -- cgit v1.2.3 From 36bd931049bf93a4eaec4a558beca477db152881 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 18 May 2023 10:55:27 -0400 Subject: mm: update vma_iter_store() to use MAS_WARN_ON() MAS_WARN_ON() will provide more information on the maple state and can be more useful for debugging. Use this version of WARN_ON() in the debugging code when storing to the tree. Update the printk to a pr_warn(), but this will only be printed when maple tree debug is enabled anyways. Making all print statements into one will keep them together on a busy terminal. Link: https://lkml.kernel.org/r/20230518145544.1722059-19-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Reviewed-by: Sergey Senozhatsky Cc: David Binderman Cc: Peng Zhang Cc: Vernon Yang Cc: Wei Yang Signed-off-by: Andrew Morton --- mm/internal.h | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index 41cc5e6225fb..bb6542279599 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1060,18 +1060,17 @@ static inline void vma_iter_store(struct vma_iterator *vmi, { #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) - if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.index > vma->vm_start)) { - printk("%lu > %lu\n", vmi->mas.index, vma->vm_start); - printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); - printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); - vma_iter_dump_tree(vmi); + if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START && + vmi->mas.index > vma->vm_start)) { + pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n", + vmi->mas.index, vma->vm_start, vma->vm_start, + vma->vm_end, vmi->mas.index, vmi->mas.last); } - if (WARN_ON(vmi->mas.node != MAS_START && vmi->mas.last < vma->vm_start)) { - printk("%lu < %lu\n", vmi->mas.last, vma->vm_start); - printk("store of vma %lu-%lu", vma->vm_start, vma->vm_end); - printk("into slot %lu-%lu", vmi->mas.index, vmi->mas.last); - mt_dump(vmi->mas.tree, mt_dump_hex); - vma_iter_dump_tree(vmi); + if (MAS_WARN_ON(&vmi->mas, vmi->mas.node != MAS_START && + vmi->mas.last < vma->vm_start)) { + pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n", + vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end, + vmi->mas.index, vmi->mas.last); } #endif -- cgit v1.2.3 From 15c0c60b8cee4a6db263585520f994654429fac3 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 18 May 2023 10:55:31 -0400 Subject: mm/mmap: change do_vmi_align_munmap() for maple tree iterator changes The maple tree iterator clean up is incompatible with the way do_vmi_align_munmap() expects it to behave. Update the expected behaviour to map now since the change will work currently. Link: https://lkml.kernel.org/r/20230518145544.1722059-23-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: David Binderman Cc: Peng Zhang Cc: Sergey Senozhatsky Cc: Vernon Yang Cc: Wei Yang Signed-off-by: Andrew Morton --- mm/mmap.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 8f67d80c6dde..19d3c843be0c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2388,7 +2388,12 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, #endif } - next = vma_next(vmi); + if (vma_iter_end(vmi) > end) + next = vma_iter_load(vmi); + + if (!next) + next = vma_next(vmi); + if (unlikely(uf)) { /* * If userfaultfd_unmap_prep returns an error the vmas -- cgit v1.2.3 From 5c1c03de1b1636f041a275e777171307cac8d958 Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Thu, 18 May 2023 10:55:44 -0400 Subject: mm: avoid rewalk in mmap_region If the iterator has moved to the previous entry, then step forward one range, back to the gap. Link: https://lkml.kernel.org/r/20230518145544.1722059-36-Liam.Howlett@oracle.com Signed-off-by: Liam R. Howlett Cc: David Binderman Cc: Peng Zhang Cc: Sergey Senozhatsky Cc: Vernon Yang Cc: Wei Yang Signed-off-by: Andrew Morton --- mm/mmap.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/mmap.c b/mm/mmap.c index 19d3c843be0c..44be7fdfaac9 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2606,6 +2606,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr, } cannot_expand: + if (prev) + vma_iter_next_range(&vmi); + /* * Determine the object being mapped and call the appropriate * specific mapper. the address has already been validated, but -- cgit v1.2.3 From 62069aace145658bc8ce79cbf7b6cf611db4a22f Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Thu, 18 May 2023 10:11:19 -0400 Subject: mm: page_alloc: set sysctl_lowmem_reserve_ratio storage-class-specifier to static smatch reports mm/page_alloc.c:247:5: warning: symbol 'sysctl_lowmem_reserve_ratio' was not declared. Should it be static? This variable is only used in its defining file, so it should be static Link: https://lkml.kernel.org/r/20230518141119.927074-1-trix@redhat.com Signed-off-by: Tom Rix Signed-off-by: Andrew Morton --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ee23ba9c0ca7..b9a9ba2db9e9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -244,7 +244,7 @@ static void __free_pages_ok(struct page *page, unsigned int order, * TBD: should special case ZONE_DMA32 machines here - in those we normally * don't need any ZONE_NORMAL reservation */ -int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { +static int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = { #ifdef CONFIG_ZONE_DMA [ZONE_DMA] = 256, #endif -- cgit v1.2.3 From ecd8b2928f2efc7b678b361d51920c15b5ef3885 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:55 +0200 Subject: mm: compaction: remove compaction result helpers Patch series "mm: compaction: cleanups & simplifications". These compaction cleanups are split out from the huge page allocator series[1], as requested by reviewer feedback. [1] https://lore.kernel.org/linux-mm/20230418191313.268131-1-hannes@cmpxchg.org/ This patch (of 5): The compaction result helpers encode quirks that are specific to the allocator's retry logic. E.g. COMPACT_SUCCESS and COMPACT_COMPLETE actually represent failures that should be retried upon, and so on. I frequently found myself pulling up the helper implementation in order to understand and work on the retry logic. They're not quite clean abstractions; rather they split the retry logic into two locations. Remove the helpers and inline the checks. Then comment on the result interpretations directly where the decision making happens. Link: https://lkml.kernel.org/r/20230519123959.77335-1-hannes@cmpxchg.org Link: https://lkml.kernel.org/r/20230519123959.77335-2-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Signed-off-by: Andrew Morton --- include/linux/compaction.h | 92 ------------------------------------------ include/trace/events/mmflags.h | 4 +- mm/page_alloc.c | 30 ++++++++------ 3 files changed, 19 insertions(+), 107 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index a6e512cfb670..1f0328a2ba48 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -95,78 +95,6 @@ extern enum compact_result compaction_suitable(struct zone *zone, int order, extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); -/* Compaction has made some progress and retrying makes sense */ -static inline bool compaction_made_progress(enum compact_result result) -{ - /* - * Even though this might sound confusing this in fact tells us - * that the compaction successfully isolated and migrated some - * pageblocks. - */ - if (result == COMPACT_SUCCESS) - return true; - - return false; -} - -/* Compaction has failed and it doesn't make much sense to keep retrying. */ -static inline bool compaction_failed(enum compact_result result) -{ - /* All zones were scanned completely and still not result. */ - if (result == COMPACT_COMPLETE) - return true; - - return false; -} - -/* Compaction needs reclaim to be performed first, so it can continue. */ -static inline bool compaction_needs_reclaim(enum compact_result result) -{ - /* - * Compaction backed off due to watermark checks for order-0 - * so the regular reclaim has to try harder and reclaim something. - */ - if (result == COMPACT_SKIPPED) - return true; - - return false; -} - -/* - * Compaction has backed off for some reason after doing some work or none - * at all. It might be throttling or lock contention. Retrying might be still - * worthwhile, but with a higher priority if allowed. - */ -static inline bool compaction_withdrawn(enum compact_result result) -{ - /* - * If compaction is deferred for high-order allocations, it is - * because sync compaction recently failed. If this is the case - * and the caller requested a THP allocation, we do not want - * to heavily disrupt the system, so we fail the allocation - * instead of entering direct reclaim. - */ - if (result == COMPACT_DEFERRED) - return true; - - /* - * If compaction in async mode encounters contention or blocks higher - * priority task we back off early rather than cause stalls. - */ - if (result == COMPACT_CONTENDED) - return true; - - /* - * Page scanners have met but we haven't scanned full zones so this - * is a back off in fact. - */ - if (result == COMPACT_PARTIAL_SKIPPED) - return true; - - return false; -} - - bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags); @@ -185,26 +113,6 @@ static inline enum compact_result compaction_suitable(struct zone *zone, int ord return COMPACT_SKIPPED; } -static inline bool compaction_made_progress(enum compact_result result) -{ - return false; -} - -static inline bool compaction_failed(enum compact_result result) -{ - return false; -} - -static inline bool compaction_needs_reclaim(enum compact_result result) -{ - return false; -} - -static inline bool compaction_withdrawn(enum compact_result result) -{ - return true; -} - static inline void kcompactd_run(int nid) { } diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h index b63e7c0fbbe5..1478b9dd05fa 100644 --- a/include/trace/events/mmflags.h +++ b/include/trace/events/mmflags.h @@ -223,8 +223,8 @@ IF_HAVE_VM_SOFTDIRTY(VM_SOFTDIRTY, "softdirty" ) \ #define compact_result_to_feedback(result) \ ({ \ enum compact_result __result = result; \ - (compaction_failed(__result)) ? COMPACTION_FAILED : \ - (compaction_withdrawn(__result)) ? COMPACTION_WITHDRAWN : COMPACTION_PROGRESS; \ + (__result == COMPACT_COMPLETE) ? COMPACTION_FAILED : \ + (__result == COMPACT_SUCCESS) ? COMPACTION_PROGRESS : COMPACTION_WITHDRAWN; \ }) #define COMPACTION_FEEDBACK \ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b9a9ba2db9e9..e3a3ebc2dfce 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3469,35 +3469,39 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, if (fatal_signal_pending(current)) return false; - if (compaction_made_progress(compact_result)) + /* + * Compaction managed to coalesce some page blocks, but the + * allocation failed presumably due to a race. Retry some. + */ + if (compact_result == COMPACT_SUCCESS) (*compaction_retries)++; /* - * compaction considers all the zone as desperately out of memory - * so it doesn't really make much sense to retry except when the + * All zones were scanned completely and still no result. It + * doesn't really make much sense to retry except when the * failure could be caused by insufficient priority */ - if (compaction_failed(compact_result)) + if (compact_result == COMPACT_COMPLETE) goto check_priority; /* - * compaction was skipped because there are not enough order-0 pages - * to work with, so we retry only if it looks like reclaim can help. + * Compaction was skipped due to a lack of free order-0 + * migration targets. Continue if reclaim can help. */ - if (compaction_needs_reclaim(compact_result)) { + if (compact_result == COMPACT_SKIPPED) { ret = compaction_zonelist_suitable(ac, order, alloc_flags); goto out; } /* - * make sure the compaction wasn't deferred or didn't bail out early - * due to locks contention before we declare that we should give up. - * But the next retry should use a higher priority if allowed, so - * we don't just keep bailing out endlessly. + * If compaction backed due to being deferred, due to + * contended locks in async mode, or due to scanners meeting + * after a partial scan, retry with increased priority. */ - if (compaction_withdrawn(compact_result)) { + if (compact_result == COMPACT_DEFERRED || + compact_result == COMPACT_CONTENDED || + compact_result == COMPACT_PARTIAL_SKIPPED) goto check_priority; - } /* * !costly requests are much more important than __GFP_RETRY_MAYFAIL -- cgit v1.2.3 From 511a69b27fe6c2d7312789bd9e2e40b00e3903ef Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:56 +0200 Subject: mm: compaction: simplify should_compact_retry() The different branches for retry are unnecessarily complicated. There are really only three outcomes: progress (retry n times), skipped (retry if reclaim can help), failed (retry with higher priority). Rearrange the branches and the retry counter to make it simpler. [hannes@cmpxchg.org: restore behavior when hitting max_retries] Link: https://lkml.kernel.org/r/20230602144705.GB161817@cmpxchg.org Link: https://lkml.kernel.org/r/20230519123959.77335-3-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/page_alloc.c | 57 +++++++++++++++++++-------------------------------------- 1 file changed, 19 insertions(+), 38 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e3a3ebc2dfce..7e8673504a3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3469,21 +3469,6 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, if (fatal_signal_pending(current)) return false; - /* - * Compaction managed to coalesce some page blocks, but the - * allocation failed presumably due to a race. Retry some. - */ - if (compact_result == COMPACT_SUCCESS) - (*compaction_retries)++; - - /* - * All zones were scanned completely and still no result. It - * doesn't really make much sense to retry except when the - * failure could be caused by insufficient priority - */ - if (compact_result == COMPACT_COMPLETE) - goto check_priority; - /* * Compaction was skipped due to a lack of free order-0 * migration targets. Continue if reclaim can help. @@ -3494,35 +3479,31 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, } /* - * If compaction backed due to being deferred, due to - * contended locks in async mode, or due to scanners meeting - * after a partial scan, retry with increased priority. + * Compaction managed to coalesce some page blocks, but the + * allocation failed presumably due to a race. Retry some. */ - if (compact_result == COMPACT_DEFERRED || - compact_result == COMPACT_CONTENDED || - compact_result == COMPACT_PARTIAL_SKIPPED) - goto check_priority; + if (compact_result == COMPACT_SUCCESS) { + /* + * !costly requests are much more important than + * __GFP_RETRY_MAYFAIL costly ones because they are de + * facto nofail and invoke OOM killer to move on while + * costly can fail and users are ready to cope with + * that. 1/4 retries is rather arbitrary but we would + * need much more detailed feedback from compaction to + * make a better decision. + */ + if (order > PAGE_ALLOC_COSTLY_ORDER) + max_retries /= 4; - /* - * !costly requests are much more important than __GFP_RETRY_MAYFAIL - * costly ones because they are de facto nofail and invoke OOM - * killer to move on while costly can fail and users are ready - * to cope with that. 1/4 retries is rather arbitrary but we - * would need much more detailed feedback from compaction to - * make a better decision. - */ - if (order > PAGE_ALLOC_COSTLY_ORDER) - max_retries /= 4; - if (*compaction_retries <= max_retries) { - ret = true; - goto out; + if (++(*compaction_retries) <= max_retries) { + ret = true; + goto out; + } } /* - * Make sure there are attempts at the highest priority if we exhausted - * all retries or failed at the lower priorities. + * Compaction failed. Retry with increasing priority. */ -check_priority: min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ? MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY; -- cgit v1.2.3 From e8606320e9af9774fd879e71c940fc9e5fd9b901 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:57 +0200 Subject: mm: compaction: refactor __compaction_suitable() __compaction_suitable() is supposed to check for available migration targets. However, it also checks whether the operation was requested via /proc/sys/vm/compact_memory, and whether the original allocation request can already succeed. These don't apply to all callsites. Move the checks out to the callers, so that later patches can deal with them one by one. No functional change intended. [hannes@cmpxchg.org: fix comment, per Vlastimil] Link: https://lkml.kernel.org/r/20230602144942.GC161817@cmpxchg.org Link: https://lkml.kernel.org/r/20230519123959.77335-4-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Signed-off-by: Andrew Morton --- include/linux/compaction.h | 4 +-- mm/compaction.c | 79 +++++++++++++++++++++++++++++----------------- mm/vmscan.c | 35 ++++++++++++-------- 3 files changed, 73 insertions(+), 45 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 1f0328a2ba48..9f7cf3e1bf89 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -90,7 +90,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, struct page **page); extern void reset_isolation_suitable(pg_data_t *pgdat); extern enum compact_result compaction_suitable(struct zone *zone, int order, - unsigned int alloc_flags, int highest_zoneidx); + int highest_zoneidx); extern void compaction_defer_reset(struct zone *zone, int order, bool alloc_success); @@ -108,7 +108,7 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) } static inline enum compact_result compaction_suitable(struct zone *zone, int order, - int alloc_flags, int highest_zoneidx) + int highest_zoneidx) { return COMPACT_SKIPPED; } diff --git a/mm/compaction.c b/mm/compaction.c index e23e00bec030..bb9b76244a5d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2194,24 +2194,10 @@ static enum compact_result compact_finished(struct compact_control *cc) } static enum compact_result __compaction_suitable(struct zone *zone, int order, - unsigned int alloc_flags, int highest_zoneidx, unsigned long wmark_target) { unsigned long watermark; - - if (is_via_compact_memory(order)) - return COMPACT_CONTINUE; - - watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); - /* - * If watermarks for high-order allocation are already met, there - * should be no need for compaction at all. - */ - if (zone_watermark_ok(zone, order, watermark, highest_zoneidx, - alloc_flags)) - return COMPACT_SUCCESS; - /* * Watermarks for order-0 must be met for compaction to be able to * isolate free pages for migration targets. This means that the @@ -2240,17 +2226,15 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, * compaction_suitable: Is this suitable to run compaction on this zone now? * Returns * COMPACT_SKIPPED - If there are too few free pages for compaction - * COMPACT_SUCCESS - If the allocation would succeed without compaction * COMPACT_CONTINUE - If compaction should run now */ enum compact_result compaction_suitable(struct zone *zone, int order, - unsigned int alloc_flags, int highest_zoneidx) { enum compact_result ret; int fragindex; - ret = __compaction_suitable(zone, order, alloc_flags, highest_zoneidx, + ret = __compaction_suitable(zone, order, highest_zoneidx, zone_page_state(zone, NR_FREE_PAGES)); /* * fragmentation index determines if allocation failures are due to @@ -2294,7 +2278,16 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->highest_zoneidx, ac->nodemask) { unsigned long available; - enum compact_result compact_result; + unsigned long watermark; + + if (is_via_compact_memory(order)) + return true; + + /* Allocation can already succeed, nothing to do */ + watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); + if (zone_watermark_ok(zone, order, watermark, + ac->highest_zoneidx, alloc_flags)) + continue; /* * Do not consider all the reclaimable memory because we do not @@ -2304,9 +2297,8 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, */ available = zone_reclaimable_pages(zone) / order; available += zone_page_state_snapshot(zone, NR_FREE_PAGES); - compact_result = __compaction_suitable(zone, order, alloc_flags, - ac->highest_zoneidx, available); - if (compact_result == COMPACT_CONTINUE) + if (__compaction_suitable(zone, order, ac->highest_zoneidx, + available) == COMPACT_CONTINUE) return true; } @@ -2336,11 +2328,23 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) INIT_LIST_HEAD(&cc->migratepages); cc->migratetype = gfp_migratetype(cc->gfp_mask); - ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, - cc->highest_zoneidx); - /* Compaction is likely to fail */ - if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) - return ret; + + if (!is_via_compact_memory(cc->order)) { + unsigned long watermark; + + /* Allocation can already succeed, nothing to do */ + watermark = wmark_pages(cc->zone, + cc->alloc_flags & ALLOC_WMARK_MASK); + if (zone_watermark_ok(cc->zone, cc->order, watermark, + cc->highest_zoneidx, cc->alloc_flags)) + return COMPACT_SUCCESS; + + ret = compaction_suitable(cc->zone, cc->order, + cc->highest_zoneidx); + /* Compaction is likely to fail */ + if (ret == COMPACT_SKIPPED) + return ret; + } /* * Clear pageblock skip if there were failures recently and compaction @@ -2844,7 +2848,16 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) if (!populated_zone(zone)) continue; - if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0, + if (is_via_compact_memory(pgdat->kcompactd_max_order)) + return true; + + /* Allocation can already succeed, check other zones */ + if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, + min_wmark_pages(zone), + highest_zoneidx, 0)) + continue; + + if (compaction_suitable(zone, pgdat->kcompactd_max_order, highest_zoneidx) == COMPACT_CONTINUE) return true; } @@ -2882,10 +2895,18 @@ static void kcompactd_do_work(pg_data_t *pgdat) if (compaction_deferred(zone, cc.order)) continue; - if (compaction_suitable(zone, cc.order, 0, zoneid) != - COMPACT_CONTINUE) + if (is_via_compact_memory(cc.order)) + goto compact; + + /* Allocation can already succeed, nothing to do */ + if (zone_watermark_ok(zone, cc.order, + min_wmark_pages(zone), zoneid, 0)) continue; + if (compaction_suitable(zone, cc.order, + zoneid) != COMPACT_CONTINUE) + continue; +compact: if (kthread_should_stop()) return; diff --git a/mm/vmscan.c b/mm/vmscan.c index 4637f6462e9c..9f8bfd1fcf58 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6399,14 +6399,17 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, if (!managed_zone(zone)) continue; - switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) { - case COMPACT_SUCCESS: - case COMPACT_CONTINUE: + if (sc->order == -1) /* is_via_compact_memory() */ + return false; + + /* Allocation can already succeed, nothing to do */ + if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), + sc->reclaim_idx, 0)) + return false; + + if (compaction_suitable(zone, sc->order, + sc->reclaim_idx) == COMPACT_CONTINUE) return false; - default: - /* check next zone */ - ; - } } /* @@ -6594,16 +6597,20 @@ again: static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) { unsigned long watermark; - enum compact_result suitable; - suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx); - if (suitable == COMPACT_SUCCESS) - /* Allocation should succeed already. Don't reclaim. */ + if (sc->order == -1) /* is_via_compact_memory() */ + goto suitable; + + /* Allocation can already succeed, nothing to do */ + if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), + sc->reclaim_idx, 0)) return true; - if (suitable == COMPACT_SKIPPED) - /* Compaction cannot yet proceed. Do reclaim. */ - return false; + /* Compaction cannot yet proceed. Do reclaim. */ + if (compaction_suitable(zone, sc->order, + sc->reclaim_idx) == COMPACT_SKIPPED) + return false; +suitable: /* * Compaction is already possible, but it takes time to run and there * are potentially other callers using the pages just freed. So proceed -- cgit v1.2.3 From f98a497e1f16ee411df72629e32e31cba4cfa9cf Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:58 +0200 Subject: mm: compaction: remove unnecessary is_via_compact_memory() checks Remove from all paths not reachable via /proc/sys/vm/compact_memory. Link: https://lkml.kernel.org/r/20230519123959.77335-5-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Cc: Baolin Wang Signed-off-by: Andrew Morton --- mm/compaction.c | 11 +---------- mm/vmscan.c | 8 +------- 2 files changed, 2 insertions(+), 17 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index bb9b76244a5d..bc1f389ed378 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2280,9 +2280,6 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, unsigned long available; unsigned long watermark; - if (is_via_compact_memory(order)) - return true; - /* Allocation can already succeed, nothing to do */ watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); if (zone_watermark_ok(zone, order, watermark, @@ -2848,9 +2845,6 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) if (!populated_zone(zone)) continue; - if (is_via_compact_memory(pgdat->kcompactd_max_order)) - return true; - /* Allocation can already succeed, check other zones */ if (zone_watermark_ok(zone, pgdat->kcompactd_max_order, min_wmark_pages(zone), @@ -2895,9 +2889,6 @@ static void kcompactd_do_work(pg_data_t *pgdat) if (compaction_deferred(zone, cc.order)) continue; - if (is_via_compact_memory(cc.order)) - goto compact; - /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, cc.order, min_wmark_pages(zone), zoneid, 0)) @@ -2906,7 +2897,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) if (compaction_suitable(zone, cc.order, zoneid) != COMPACT_CONTINUE) continue; -compact: + if (kthread_should_stop()) return; diff --git a/mm/vmscan.c b/mm/vmscan.c index 9f8bfd1fcf58..99e4ae44850d 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6399,9 +6399,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, if (!managed_zone(zone)) continue; - if (sc->order == -1) /* is_via_compact_memory() */ - return false; - /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), sc->reclaim_idx, 0)) @@ -6598,9 +6595,6 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) { unsigned long watermark; - if (sc->order == -1) /* is_via_compact_memory() */ - goto suitable; - /* Allocation can already succeed, nothing to do */ if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), sc->reclaim_idx, 0)) @@ -6610,7 +6604,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) if (compaction_suitable(zone, sc->order, sc->reclaim_idx) == COMPACT_SKIPPED) return false; -suitable: + /* * Compaction is already possible, but it takes time to run and there * are potentially other callers using the pages just freed. So proceed -- cgit v1.2.3 From 1c9568e806a589da84b7afbdf0619b2c1f6c102a Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 14:39:59 +0200 Subject: mm: compaction: drop redundant watermark check in compaction_zonelist_suitable() The watermark check in compaction_zonelist_suitable(), called from should_compact_retry(), is sandwiched between two watermark checks already: before, there are freelist attempts as part of direct reclaim and direct compaction; after, there is a last-minute freelist attempt in __alloc_pages_may_oom(). The check in compaction_zonelist_suitable() isn't necessary. Kill it. Link: https://lkml.kernel.org/r/20230519123959.77335-6-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Michal Hocko Cc: Baolin Wang Signed-off-by: Andrew Morton --- mm/compaction.c | 7 ------- 1 file changed, 7 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index bc1f389ed378..470cfd24ef18 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2278,13 +2278,6 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->highest_zoneidx, ac->nodemask) { unsigned long available; - unsigned long watermark; - - /* Allocation can already succeed, nothing to do */ - watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK); - if (zone_watermark_ok(zone, order, watermark, - ac->highest_zoneidx, alloc_flags)) - continue; /* * Do not consider all the reclaimable memory because we do not -- cgit v1.2.3 From 3cf04937529020e149666f56a41ebdeb226b69ed Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 2 Jun 2023 11:12:04 -0400 Subject: mm: compaction: have compaction_suitable() return bool Since it only returns COMPACT_CONTINUE or COMPACT_SKIPPED now, a bool return value simplifies the callsites. Link: https://lkml.kernel.org/r/20230602151204.GD161817@cmpxchg.org Signed-off-by: Johannes Weiner Suggested-by: Vlastimil Babka Acked-by: Vlastimil Babka Cc: Baolin Wang Cc: Mel Gorman Cc: Michal Hocko Signed-off-by: Andrew Morton --- include/linux/compaction.h | 6 ++--- mm/compaction.c | 64 ++++++++++++++++++++++------------------------ mm/vmscan.c | 6 ++--- 3 files changed, 36 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 9f7cf3e1bf89..57b16e69c19a 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -89,7 +89,7 @@ extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, const struct alloc_context *ac, enum compact_priority prio, struct page **page); extern void reset_isolation_suitable(pg_data_t *pgdat); -extern enum compact_result compaction_suitable(struct zone *zone, int order, +extern bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx); extern void compaction_defer_reset(struct zone *zone, int order, @@ -107,10 +107,10 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) { } -static inline enum compact_result compaction_suitable(struct zone *zone, int order, +static inline bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) { - return COMPACT_SKIPPED; + return false; } static inline void kcompactd_run(int nid) diff --git a/mm/compaction.c b/mm/compaction.c index 470cfd24ef18..9b550bfe900b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -2193,9 +2193,9 @@ static enum compact_result compact_finished(struct compact_control *cc) return ret; } -static enum compact_result __compaction_suitable(struct zone *zone, int order, - int highest_zoneidx, - unsigned long wmark_target) +static bool __compaction_suitable(struct zone *zone, int order, + int highest_zoneidx, + unsigned long wmark_target) { unsigned long watermark; /* @@ -2215,27 +2215,20 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ? low_wmark_pages(zone) : min_wmark_pages(zone); watermark += compact_gap(order); - if (!__zone_watermark_ok(zone, 0, watermark, highest_zoneidx, - ALLOC_CMA, wmark_target)) - return COMPACT_SKIPPED; - - return COMPACT_CONTINUE; + return __zone_watermark_ok(zone, 0, watermark, highest_zoneidx, + ALLOC_CMA, wmark_target); } /* * compaction_suitable: Is this suitable to run compaction on this zone now? - * Returns - * COMPACT_SKIPPED - If there are too few free pages for compaction - * COMPACT_CONTINUE - If compaction should run now */ -enum compact_result compaction_suitable(struct zone *zone, int order, - int highest_zoneidx) +bool compaction_suitable(struct zone *zone, int order, int highest_zoneidx) { - enum compact_result ret; - int fragindex; + enum compact_result compact_result; + bool suitable; - ret = __compaction_suitable(zone, order, highest_zoneidx, - zone_page_state(zone, NR_FREE_PAGES)); + suitable = __compaction_suitable(zone, order, highest_zoneidx, + zone_page_state(zone, NR_FREE_PAGES)); /* * fragmentation index determines if allocation failures are due to * low memory or external fragmentation @@ -2252,17 +2245,24 @@ enum compact_result compaction_suitable(struct zone *zone, int order, * excessive compaction for costly orders, but it should not be at the * expense of system stability. */ - if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) { - fragindex = fragmentation_index(zone, order); - if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) - ret = COMPACT_NOT_SUITABLE_ZONE; + if (suitable) { + compact_result = COMPACT_CONTINUE; + if (order > PAGE_ALLOC_COSTLY_ORDER) { + int fragindex = fragmentation_index(zone, order); + + if (fragindex >= 0 && + fragindex <= sysctl_extfrag_threshold) { + suitable = false; + compact_result = COMPACT_NOT_SUITABLE_ZONE; + } + } + } else { + compact_result = COMPACT_SKIPPED; } - trace_mm_compaction_suitable(zone, order, ret); - if (ret == COMPACT_NOT_SUITABLE_ZONE) - ret = COMPACT_SKIPPED; + trace_mm_compaction_suitable(zone, order, compact_result); - return ret; + return suitable; } bool compaction_zonelist_suitable(struct alloc_context *ac, int order, @@ -2288,7 +2288,7 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, available = zone_reclaimable_pages(zone) / order; available += zone_page_state_snapshot(zone, NR_FREE_PAGES); if (__compaction_suitable(zone, order, ac->highest_zoneidx, - available) == COMPACT_CONTINUE) + available)) return true; } @@ -2329,11 +2329,10 @@ compact_zone(struct compact_control *cc, struct capture_control *capc) cc->highest_zoneidx, cc->alloc_flags)) return COMPACT_SUCCESS; - ret = compaction_suitable(cc->zone, cc->order, - cc->highest_zoneidx); /* Compaction is likely to fail */ - if (ret == COMPACT_SKIPPED) - return ret; + if (!compaction_suitable(cc->zone, cc->order, + cc->highest_zoneidx)) + return COMPACT_SKIPPED; } /* @@ -2845,7 +2844,7 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat) continue; if (compaction_suitable(zone, pgdat->kcompactd_max_order, - highest_zoneidx) == COMPACT_CONTINUE) + highest_zoneidx)) return true; } @@ -2887,8 +2886,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) min_wmark_pages(zone), zoneid, 0)) continue; - if (compaction_suitable(zone, cc.order, - zoneid) != COMPACT_CONTINUE) + if (!compaction_suitable(zone, cc.order, zoneid)) continue; if (kthread_should_stop()) diff --git a/mm/vmscan.c b/mm/vmscan.c index 99e4ae44850d..df7e52b522ec 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6404,8 +6404,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, sc->reclaim_idx, 0)) return false; - if (compaction_suitable(zone, sc->order, - sc->reclaim_idx) == COMPACT_CONTINUE) + if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) return false; } @@ -6601,8 +6600,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) return true; /* Compaction cannot yet proceed. Do reclaim. */ - if (compaction_suitable(zone, sc->order, - sc->reclaim_idx) == COMPACT_SKIPPED) + if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) return false; /* -- cgit v1.2.3 From 4fbbb3fde3c69879ceebb33a8edd9d867008728b Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 13:13:59 +0200 Subject: mm: compaction: avoid GFP_NOFS ABBA deadlock During stress testing with higher-order allocations, a deadlock scenario was observed in compaction: One GFP_NOFS allocation was sleeping on mm/compaction.c::too_many_isolated(), while all CPUs in the system were busy with compactors spinning on buffer locks held by the sleeping GFP_NOFS allocation. Reclaim is susceptible to this same deadlock; we fixed it by granting GFP_NOFS allocations additional LRU isolation headroom, to ensure it makes forward progress while holding fs locks that other reclaimers might acquire. Do the same here. This code has been like this since compaction was initially merged, and I only managed to trigger this with out-of-tree patches that dramatically increase the contexts that do GFP_NOFS compaction. While the issue is real, it seems theoretical in nature given existing allocation sites. Worth fixing now, but no Fixes tag or stable CC. Link: https://lkml.kernel.org/r/20230519111359.40475-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Acked-by: Mel Gorman Acked-by: Vlastimil Babka Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/compaction.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 9b550bfe900b..261071a07681 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -740,8 +740,9 @@ isolate_freepages_range(struct compact_control *cc, } /* Similar to reclaim, but different enough that they don't share logic */ -static bool too_many_isolated(pg_data_t *pgdat) +static bool too_many_isolated(struct compact_control *cc) { + pg_data_t *pgdat = cc->zone->zone_pgdat; bool too_many; unsigned long active, inactive, isolated; @@ -753,6 +754,17 @@ static bool too_many_isolated(pg_data_t *pgdat) isolated = node_page_state(pgdat, NR_ISOLATED_FILE) + node_page_state(pgdat, NR_ISOLATED_ANON); + /* + * Allow GFP_NOFS to isolate past the limit set for regular + * compaction runs. This prevents an ABBA deadlock when other + * compactors have already isolated to the limit, but are + * blocked on filesystem locks held by the GFP_NOFS thread. + */ + if (cc->gfp_mask & __GFP_FS) { + inactive >>= 3; + active >>= 3; + } + too_many = isolated > (inactive + active) / 2; if (!too_many) wake_throttle_isolated(pgdat); @@ -801,7 +813,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * list by either parallel reclaimers or compaction. If there are, * delay for some time until fewer pages are isolated */ - while (unlikely(too_many_isolated(pgdat))) { + while (unlikely(too_many_isolated(cc))) { /* stop isolation if there are still pages not migrated */ if (cc->nr_migratepages) return -EAGAIN; -- cgit v1.2.3 From 3c54a298db4c6b38bbd5f86216ce0f5ad4596ccf Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Mon, 22 May 2023 09:24:12 +0100 Subject: mm/mmap: refactor mlock_future_check() In all but one instance, mlock_future_check() is treated as a boolean function despite returning an error code. In one instance, this error code is ignored and replaced with -ENOMEM. This is confusing, and the inversion of true -> failure, false -> success is not warranted. Convert the function to a bool, lightly refactor and return true if the check passes, false if not. Link: https://lkml.kernel.org/r/20230522082412.56685-1-lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Acked-by: Vlastimil Babka Cc: Liam Howlett Cc: Mike Rapoport (IBM) Signed-off-by: Andrew Morton --- mm/internal.h | 4 ++-- mm/mmap.c | 33 +++++++++++++++++---------------- mm/mremap.c | 2 +- mm/secretmem.c | 2 +- 4 files changed, 21 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index bb6542279599..66dd214b302a 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -576,8 +576,8 @@ extern long populate_vma_page_range(struct vm_area_struct *vma, extern long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool write, int *locked); -extern int mlock_future_check(struct mm_struct *mm, unsigned long flags, - unsigned long len); +extern bool mlock_future_check(struct mm_struct *mm, unsigned long flags, + unsigned long bytes); /* * mlock_vma_folio() and munlock_vma_folio(): * should be called with vma's mmap_lock held for read or write, diff --git a/mm/mmap.c b/mm/mmap.c index 44be7fdfaac9..28d2c489a7e5 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -182,7 +182,8 @@ static int check_brk_limits(unsigned long addr, unsigned long len) if (IS_ERR_VALUE(mapped_addr)) return mapped_addr; - return mlock_future_check(current->mm, current->mm->def_flags, len); + return mlock_future_check(current->mm, current->mm->def_flags, len) + ? 0 : -EAGAIN; } static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, unsigned long addr, unsigned long request, unsigned long flags); @@ -1145,21 +1146,21 @@ static inline unsigned long round_hint_to_min(unsigned long hint) return hint; } -int mlock_future_check(struct mm_struct *mm, unsigned long flags, - unsigned long len) +bool mlock_future_check(struct mm_struct *mm, unsigned long flags, + unsigned long bytes) { - unsigned long locked, lock_limit; + unsigned long locked_pages, limit_pages; - /* mlock MCL_FUTURE? */ - if (flags & VM_LOCKED) { - locked = len >> PAGE_SHIFT; - locked += mm->locked_vm; - lock_limit = rlimit(RLIMIT_MEMLOCK); - lock_limit >>= PAGE_SHIFT; - if (locked > lock_limit && !capable(CAP_IPC_LOCK)) - return -EAGAIN; - } - return 0; + if (!(flags & VM_LOCKED) || capable(CAP_IPC_LOCK)) + return true; + + locked_pages = bytes >> PAGE_SHIFT; + locked_pages += mm->locked_vm; + + limit_pages = rlimit(RLIMIT_MEMLOCK); + limit_pages >>= PAGE_SHIFT; + + return locked_pages <= limit_pages; } static inline u64 file_mmap_size_max(struct file *file, struct inode *inode) @@ -1271,7 +1272,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (!can_do_mlock()) return -EPERM; - if (mlock_future_check(mm, vm_flags, len)) + if (!mlock_future_check(mm, vm_flags, len)) return -EAGAIN; if (file) { @@ -1889,7 +1890,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, return -ENOMEM; /* mlock limit tests */ - if (mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT)) + if (!mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT)) return -ENOMEM; /* Check to ensure the stack will not grow into a hugetlb-only region */ diff --git a/mm/mremap.c b/mm/mremap.c index b11ce6c92099..bcfcb8df5875 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -775,7 +775,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) return ERR_PTR(-EFAULT); - if (mlock_future_check(mm, vma->vm_flags, new_len - old_len)) + if (!mlock_future_check(mm, vma->vm_flags, new_len - old_len)) return ERR_PTR(-EAGAIN); if (!may_expand_vm(mm, vma->vm_flags, diff --git a/mm/secretmem.c b/mm/secretmem.c index 974b32ba8b9d..58d2af12df4f 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -125,7 +125,7 @@ static int secretmem_mmap(struct file *file, struct vm_area_struct *vma) if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; - if (mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len)) + if (!mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len)) return -EAGAIN; vm_flags_set(vma, VM_LOCKED | VM_DONTDUMP); -- cgit v1.2.3 From b0cc5e89caadc21c2d8f7dc4c97947761b358876 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Mon, 22 May 2023 13:52:10 -0700 Subject: mm/mlock: rename mlock_future_check() to mlock_future_ok() It is felt that the name mlock_future_check() is vague - it doesn't particularly convey the function's operation. mlock_future_ok() is a clearer name for a predicate function. Acked-by: Vlastimil Babka Cc: Liam Howlett Cc: Lorenzo Stoakes Cc: Mike Rapoport (IBM) Signed-off-by: Andrew Morton --- mm/internal.h | 2 +- mm/mmap.c | 8 ++++---- mm/mremap.c | 2 +- mm/secretmem.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/internal.h b/mm/internal.h index 66dd214b302a..f45f5eb4514f 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -576,7 +576,7 @@ extern long populate_vma_page_range(struct vm_area_struct *vma, extern long faultin_vma_page_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, bool write, int *locked); -extern bool mlock_future_check(struct mm_struct *mm, unsigned long flags, +extern bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, unsigned long bytes); /* * mlock_vma_folio() and munlock_vma_folio(): diff --git a/mm/mmap.c b/mm/mmap.c index 28d2c489a7e5..e1624cb2c04e 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -182,7 +182,7 @@ static int check_brk_limits(unsigned long addr, unsigned long len) if (IS_ERR_VALUE(mapped_addr)) return mapped_addr; - return mlock_future_check(current->mm, current->mm->def_flags, len) + return mlock_future_ok(current->mm, current->mm->def_flags, len) ? 0 : -EAGAIN; } static int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma, @@ -1146,7 +1146,7 @@ static inline unsigned long round_hint_to_min(unsigned long hint) return hint; } -bool mlock_future_check(struct mm_struct *mm, unsigned long flags, +bool mlock_future_ok(struct mm_struct *mm, unsigned long flags, unsigned long bytes) { unsigned long locked_pages, limit_pages; @@ -1272,7 +1272,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr, if (!can_do_mlock()) return -EPERM; - if (!mlock_future_check(mm, vm_flags, len)) + if (!mlock_future_ok(mm, vm_flags, len)) return -EAGAIN; if (file) { @@ -1890,7 +1890,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, return -ENOMEM; /* mlock limit tests */ - if (!mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT)) + if (!mlock_future_ok(mm, vma->vm_flags, grow << PAGE_SHIFT)) return -ENOMEM; /* Check to ensure the stack will not grow into a hugetlb-only region */ diff --git a/mm/mremap.c b/mm/mremap.c index bcfcb8df5875..da107f2c71bf 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -775,7 +775,7 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr, if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) return ERR_PTR(-EFAULT); - if (!mlock_future_check(mm, vma->vm_flags, new_len - old_len)) + if (!mlock_future_ok(mm, vma->vm_flags, new_len - old_len)) return ERR_PTR(-EAGAIN); if (!may_expand_vm(mm, vma->vm_flags, diff --git a/mm/secretmem.c b/mm/secretmem.c index 58d2af12df4f..86442a15d12f 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -125,7 +125,7 @@ static int secretmem_mmap(struct file *file, struct vm_area_struct *vma) if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0) return -EINVAL; - if (!mlock_future_check(vma->vm_mm, vma->vm_flags | VM_LOCKED, len)) + if (!mlock_future_ok(vma->vm_mm, vma->vm_flags | VM_LOCKED, len)) return -EAGAIN; vm_flags_set(vma, VM_LOCKED | VM_DONTDUMP); -- cgit v1.2.3 From 08e0f49e9991b175f2cda7ba32a7e9f1320dcbad Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Mon, 22 May 2023 09:52:32 +0000 Subject: mm/memcontrol: fix typo in comment Replace 'then' with 'than'. Link: https://lkml.kernel.org/r/20230522095233.4246-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Cc: Johannes Weiner Cc: Michal Hocko Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d31fb1e2cb33..7c681492b47b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6862,7 +6862,7 @@ static unsigned long effective_protection(unsigned long usage, protected = min(usage, setting); /* * If all cgroups at this level combined claim and use more - * protection then what the parent affords them, distribute + * protection than what the parent affords them, distribute * shares in proportion to utilization. * * We are using actual utilization rather than the statically -- cgit v1.2.3 From 0285762c6f161c3a93ffc75ba278aad21719460a Mon Sep 17 00:00:00 2001 From: "T.J. Alumbaugh" Date: Mon, 22 May 2023 11:20:55 +0000 Subject: mm: multi-gen LRU: use macro for bitmap Use DECLARE_BITMAP macro when possible. Link: https://lkml.kernel.org/r/20230522112058.2965866-1-talumbau@google.com Signed-off-by: T.J. Alumbaugh Reviewed-by: David Hildenbrand Reviewed-by: Yuanchu Xie Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index df7e52b522ec..cafb933d609f 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4144,7 +4144,7 @@ static void walk_pmd_range(pud_t *pud, unsigned long start, unsigned long end, unsigned long next; unsigned long addr; struct vm_area_struct *vma; - unsigned long bitmap[BITS_TO_LONGS(MIN_LRU_BATCH)]; + DECLARE_BITMAP(bitmap, MIN_LRU_BATCH); unsigned long first = -1; struct lru_gen_mm_walk *walk = args->private; -- cgit v1.2.3 From 5c7e7a0d79072eb02780a2c0dee730b23cde711d Mon Sep 17 00:00:00 2001 From: "T.J. Alumbaugh" Date: Mon, 22 May 2023 11:20:56 +0000 Subject: mm: multi-gen LRU: cleanup lru_gen_soft_reclaim() lru_gen_soft_reclaim() gets the lruvec from the memcg and node ID to keep a cleaner interface on the caller side. Link: https://lkml.kernel.org/r/20230522112058.2965866-2-talumbau@google.com Signed-off-by: T.J. Alumbaugh Reviewed-by: Yuanchu Xie Cc: David Hildenbrand Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 4 ++-- mm/memcontrol.c | 2 +- mm/vmscan.c | 4 +++- 3 files changed, 6 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3a68326c9989..5a7ada0413da 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -534,7 +534,7 @@ void lru_gen_exit_memcg(struct mem_cgroup *memcg); void lru_gen_online_memcg(struct mem_cgroup *memcg); void lru_gen_offline_memcg(struct mem_cgroup *memcg); void lru_gen_release_memcg(struct mem_cgroup *memcg); -void lru_gen_soft_reclaim(struct lruvec *lruvec); +void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid); #else /* !CONFIG_MEMCG */ @@ -585,7 +585,7 @@ static inline void lru_gen_release_memcg(struct mem_cgroup *memcg) { } -static inline void lru_gen_soft_reclaim(struct lruvec *lruvec) +static inline void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 7c681492b47b..6a3d4ce87b8a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -485,7 +485,7 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, int nid) if (lru_gen_enabled()) { if (soft_limit_excess(memcg)) - lru_gen_soft_reclaim(&memcg->nodeinfo[nid]->lruvec); + lru_gen_soft_reclaim(memcg, nid); return; } diff --git a/mm/vmscan.c b/mm/vmscan.c index cafb933d609f..a51a7e0f8b63 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4846,8 +4846,10 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg) } } -void lru_gen_soft_reclaim(struct lruvec *lruvec) +void lru_gen_soft_reclaim(struct mem_cgroup *memcg, int nid) { + struct lruvec *lruvec = get_lruvec(memcg, nid); + /* see the comment on MEMCG_NR_GENS */ if (lru_gen_memcg_seg(lruvec) != MEMCG_LRU_HEAD) lru_gen_rotate_memcg(lruvec, MEMCG_LRU_HEAD); -- cgit v1.2.3 From bd02df412cbb9a63e945a647e3dbe4d6f9e06d19 Mon Sep 17 00:00:00 2001 From: "T.J. Alumbaugh" Date: Mon, 22 May 2023 11:20:57 +0000 Subject: mm: multi-gen LRU: add helpers in page table walks Add helpers to page table walking code: - Clarifies intent via name "should_walk_mmu" and "should_clear_pmd_young" - Avoids repeating same logic in two places Link: https://lkml.kernel.org/r/20230522112058.2965866-3-talumbau@google.com Signed-off-by: T.J. Alumbaugh Reviewed-by: Yuanchu Xie Cc: David Hildenbrand Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/vmscan.c | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index a51a7e0f8b63..dbbfcc631f5c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3234,6 +3234,16 @@ DEFINE_STATIC_KEY_ARRAY_FALSE(lru_gen_caps, NR_LRU_GEN_CAPS); #define get_cap(cap) static_branch_unlikely(&lru_gen_caps[cap]) #endif +static bool should_walk_mmu(void) +{ + return arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK); +} + +static bool should_clear_pmd_young(void) +{ + return arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG); +} + /****************************************************************************** * shorthand helpers ******************************************************************************/ @@ -4098,7 +4108,7 @@ static void walk_pmd_range_locked(pud_t *pud, unsigned long addr, struct vm_area goto next; if (!pmd_trans_huge(pmd[i])) { - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) + if (should_clear_pmd_young()) pmdp_test_and_clear_young(vma, addr, pmd + i); goto next; } @@ -4191,7 +4201,7 @@ restart: #endif walk->mm_stats[MM_NONLEAF_TOTAL]++; - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) { + if (should_clear_pmd_young()) { if (!pmd_young(val)) continue; @@ -4493,7 +4503,7 @@ static bool try_to_inc_max_seq(struct lruvec *lruvec, unsigned long max_seq, * handful of PTEs. Spreading the work out over a period of time usually * is less efficient, but it avoids bursty page faults. */ - if (!arch_has_hw_pte_young() || !get_cap(LRU_GEN_MM_WALK)) { + if (!should_walk_mmu()) { success = iterate_mm_list_nowalk(lruvec, max_seq); goto done; } @@ -5730,10 +5740,10 @@ static ssize_t enabled_show(struct kobject *kobj, struct kobj_attribute *attr, c if (get_cap(LRU_GEN_CORE)) caps |= BIT(LRU_GEN_CORE); - if (arch_has_hw_pte_young() && get_cap(LRU_GEN_MM_WALK)) + if (should_walk_mmu()) caps |= BIT(LRU_GEN_MM_WALK); - if (arch_has_hw_nonleaf_pmd_young() && get_cap(LRU_GEN_NONLEAF_YOUNG)) + if (should_clear_pmd_young()) caps |= BIT(LRU_GEN_NONLEAF_YOUNG); return sysfs_emit(buf, "0x%04x\n", caps); -- cgit v1.2.3 From d7f1afd0e3ac4c5c703b67d6003b62f760e95ba8 Mon Sep 17 00:00:00 2001 From: "T.J. Alumbaugh" Date: Mon, 22 May 2023 11:20:58 +0000 Subject: mm: multi-gen LRU: cleanup lru_gen_test_recent() Avoid passing memcg* and pglist_data* to lru_gen_test_recent() since we only use the lruvec anyway. Link: https://lkml.kernel.org/r/20230522112058.2965866-4-talumbau@google.com Signed-off-by: T.J. Alumbaugh Reviewed-by: Yuanchu Xie Cc: David Hildenbrand Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/workingset.c | 46 ++++++++++++++++------------------------------ 1 file changed, 16 insertions(+), 30 deletions(-) (limited to 'mm') diff --git a/mm/workingset.c b/mm/workingset.c index 90ae785d4c9c..5796e927e6d7 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -257,59 +257,46 @@ static void *lru_gen_eviction(struct folio *folio) /* * Tests if the shadow entry is for a folio that was recently evicted. - * Fills in @memcgid, @pglist_data, @token, @workingset with the values - * unpacked from shadow. + * Fills in @lruvec, @token, @workingset with the values unpacked from shadow. */ -static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid, - struct pglist_data **pgdat, unsigned long *token, bool *workingset) +static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, + unsigned long *token, bool *workingset) { - struct mem_cgroup *eviction_memcg; - struct lruvec *lruvec; - struct lru_gen_folio *lrugen; + int memcg_id; unsigned long min_seq; + struct mem_cgroup *memcg; + struct pglist_data *pgdat; - unpack_shadow(shadow, memcgid, pgdat, token, workingset); - eviction_memcg = mem_cgroup_from_id(*memcgid); + unpack_shadow(shadow, &memcg_id, &pgdat, token, workingset); - lruvec = mem_cgroup_lruvec(eviction_memcg, *pgdat); - lrugen = &lruvec->lrugen; + memcg = mem_cgroup_from_id(memcg_id); + *lruvec = mem_cgroup_lruvec(memcg, pgdat); - min_seq = READ_ONCE(lrugen->min_seq[file]); + min_seq = READ_ONCE((*lruvec)->lrugen.min_seq[file]); return (*token >> LRU_REFS_WIDTH) == (min_seq & (EVICTION_MASK >> LRU_REFS_WIDTH)); } static void lru_gen_refault(struct folio *folio, void *shadow) { int hist, tier, refs; - int memcg_id; bool workingset; unsigned long token; - unsigned long min_seq; struct lruvec *lruvec; struct lru_gen_folio *lrugen; - struct mem_cgroup *memcg; - struct pglist_data *pgdat; int type = folio_is_file_lru(folio); int delta = folio_nr_pages(folio); rcu_read_lock(); - if (!lru_gen_test_recent(shadow, type, &memcg_id, &pgdat, &token, - &workingset)) - goto unlock; - - memcg = folio_memcg_rcu(folio); - if (memcg_id != mem_cgroup_id(memcg)) + if (!lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset)) goto unlock; - if (pgdat != folio_pgdat(folio)) + if (lruvec != folio_lruvec(folio)) goto unlock; - lruvec = mem_cgroup_lruvec(memcg, pgdat); lrugen = &lruvec->lrugen; - min_seq = READ_ONCE(lrugen->min_seq[type]); - hist = lru_hist_from_seq(min_seq); + hist = lru_hist_from_seq(READ_ONCE(lrugen->min_seq[type])); /* see the comment in folio_lru_refs() */ refs = (token & (BIT(LRU_REFS_WIDTH) - 1)) + workingset; tier = lru_tier_from_refs(refs); @@ -339,8 +326,8 @@ static void *lru_gen_eviction(struct folio *folio) return NULL; } -static bool lru_gen_test_recent(void *shadow, bool file, int *memcgid, - struct pglist_data **pgdat, unsigned long *token, bool *workingset) +static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, + unsigned long *token, bool *workingset) { return false; } @@ -435,8 +422,7 @@ bool workingset_test_recent(void *shadow, bool file, bool *workingset) unsigned long eviction; if (lru_gen_enabled()) - return lru_gen_test_recent(shadow, file, &memcgid, &pgdat, &eviction, - workingset); + return lru_gen_test_recent(shadow, file, &eviction_lruvec, &eviction, workingset); unpack_shadow(shadow, &memcgid, &pgdat, &eviction, workingset); eviction <<= bucket_order; -- cgit v1.2.3 From fc1e0d980037e065441cd1d9a1a5e9c9117e4ba2 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:03 +0200 Subject: mm/vmalloc: prevent stale TLBs in fully utilized blocks Patch series "mm/vmalloc: Assorted fixes and improvements", v2. this series addresses the following issues: 1) Prevent the stale TLB problem related to fully utilized vmap blocks 2) Avoid the double per CPU list walk in _vm_unmap_aliases() 3) Avoid flushing dirty space over and over 4) Add a lockless quickcheck in vb_alloc() and add missing READ/WRITE_ONCE() annotations 5) Prevent overeager purging of usable vmap_blocks if not under memory/address space pressure. This patch (of 6): _vm_unmap_aliases() is used to ensure that no unflushed TLB entries for a page are left in the system. This is required due to the lazy TLB flush mechanism in vmalloc. This is tried to achieve by walking the per CPU free lists, but those do not contain fully utilized vmap blocks because they are removed from the free list once the blocks free space became zero. When the block is not fully unmapped then it is not on the purge list either. So neither the per CPU list iteration nor the purge list walk find the block and if the page was mapped via such a block and the TLB has not yet been flushed, the guarantee of _vm_unmap_aliases() that there are no stale TLBs after returning is broken: x = vb_alloc() // Removes vmap_block from free list because vb->free became 0 vb_free(x) // Unmaps page and marks in dirty_min/max range // Block has still mappings and is not put on purge list // Page is reused vm_unmap_aliases() // Can't find vmap block with the dirty space -> FAIL So instead of walking the per CPU free lists, walk the per CPU xarrays which hold pointers to _all_ active blocks in the system including those removed from the free lists. Link: https://lkml.kernel.org/r/20230525122342.109672430@linutronix.de Link: https://lkml.kernel.org/r/20230525124504.573987880@linutronix.de Fixes: db64fe02258f ("mm: rewrite vmap layer") Signed-off-by: Thomas Gleixner Reviewed-by: Christoph Hellwig Reviewed-by: Lorenzo Stoakes Reviewed-by: Uladzislau Rezki (Sony) Reviewed-by: Baoquan He Signed-off-by: Andrew Morton --- mm/vmalloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9683573f1225..ac80369eb37a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2236,9 +2236,10 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); struct vmap_block *vb; + unsigned long idx; rcu_read_lock(); - list_for_each_entry_rcu(vb, &vbq->free, free_list) { + xa_for_each(&vbq->vmap_blocks, idx, vb) { spin_lock(&vb->lock); if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; -- cgit v1.2.3 From ca5e46c3400badc418a8fbcaeba711ad60ff4e1b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:04 +0200 Subject: mm/vmalloc: avoid iterating over per CPU vmap blocks twice _vunmap_aliases() walks the per CPU xarrays to find partially unmapped blocks and then walks the per cpu free lists to purge fragmented blocks. Arguably that's waste of CPU cycles and cache lines as the full xarray walk already touches every block. Avoid this double iteration: - Split out the code to purge one block and the code to free the local purge list into helper functions. - Try to purge the fragmented blocks in the xarray walk before looking at their dirty space. Link: https://lkml.kernel.org/r/20230525124504.633469722@linutronix.de Signed-off-by: Thomas Gleixner Reviewed-by: Christoph Hellwig Reviewed-by: Baoquan He Cc: Lorenzo Stoakes Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton --- mm/vmalloc.c | 70 +++++++++++++++++++++++++++++++++++++++--------------------- 1 file changed, 46 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ac80369eb37a..eaef5e0400db 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2086,39 +2086,54 @@ static void free_vmap_block(struct vmap_block *vb) kfree_rcu(vb, rcu_head); } +static bool purge_fragmented_block(struct vmap_block *vb, + struct vmap_block_queue *vbq, struct list_head *purge_list) +{ + if (vb->free + vb->dirty != VMAP_BBMAP_BITS || + vb->dirty == VMAP_BBMAP_BITS) + return false; + + /* prevent further allocs after releasing lock */ + vb->free = 0; + /* prevent purging it again */ + vb->dirty = VMAP_BBMAP_BITS; + vb->dirty_min = 0; + vb->dirty_max = VMAP_BBMAP_BITS; + spin_lock(&vbq->lock); + list_del_rcu(&vb->free_list); + spin_unlock(&vbq->lock); + list_add_tail(&vb->purge, purge_list); + return true; +} + +static void free_purged_blocks(struct list_head *purge_list) +{ + struct vmap_block *vb, *n_vb; + + list_for_each_entry_safe(vb, n_vb, purge_list, purge) { + list_del(&vb->purge); + free_vmap_block(vb); + } +} + static void purge_fragmented_blocks(int cpu) { LIST_HEAD(purge); struct vmap_block *vb; - struct vmap_block *n_vb; struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { - - if (!(vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS)) + if (vb->free + vb->dirty != VMAP_BBMAP_BITS || + vb->dirty == VMAP_BBMAP_BITS) continue; spin_lock(&vb->lock); - if (vb->free + vb->dirty == VMAP_BBMAP_BITS && vb->dirty != VMAP_BBMAP_BITS) { - vb->free = 0; /* prevent further allocs after releasing lock */ - vb->dirty = VMAP_BBMAP_BITS; /* prevent purging it again */ - vb->dirty_min = 0; - vb->dirty_max = VMAP_BBMAP_BITS; - spin_lock(&vbq->lock); - list_del_rcu(&vb->free_list); - spin_unlock(&vbq->lock); - spin_unlock(&vb->lock); - list_add_tail(&vb->purge, &purge); - } else - spin_unlock(&vb->lock); + purge_fragmented_block(vb, vbq, &purge); + spin_unlock(&vb->lock); } rcu_read_unlock(); - - list_for_each_entry_safe(vb, n_vb, &purge, purge) { - list_del(&vb->purge); - free_vmap_block(vb); - } + free_purged_blocks(&purge); } static void purge_fragmented_blocks_allcpus(void) @@ -2226,12 +2241,13 @@ static void vb_free(unsigned long addr, unsigned long size) static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) { + LIST_HEAD(purge_list); int cpu; if (unlikely(!vmap_initialized)) return; - might_sleep(); + mutex_lock(&vmap_purge_lock); for_each_possible_cpu(cpu) { struct vmap_block_queue *vbq = &per_cpu(vmap_block_queue, cpu); @@ -2241,7 +2257,14 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) rcu_read_lock(); xa_for_each(&vbq->vmap_blocks, idx, vb) { spin_lock(&vb->lock); - if (vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { + + /* + * Try to purge a fragmented block first. If it's + * not purgeable, check whether there is dirty + * space to be flushed. + */ + if (!purge_fragmented_block(vb, vbq, &purge_list) && + vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; unsigned long s, e; @@ -2257,9 +2280,8 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) } rcu_read_unlock(); } + free_purged_blocks(&purge_list); - mutex_lock(&vmap_purge_lock); - purge_fragmented_blocks_allcpus(); if (!__purge_vmap_area_lazy(start, end) && flush) flush_tlb_kernel_range(start, end); mutex_unlock(&vmap_purge_lock); -- cgit v1.2.3 From a09fad96ffb1d0da007283727235a03b813f989b Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:05 +0200 Subject: mm/vmalloc: prevent flushing dirty space over and over vmap blocks which have active mappings cannot be purged. Allocations which have been freed are accounted for in vmap_block::dirty_min/max, so that they can be detected in _vm_unmap_aliases() as potentially stale TLBs. If there are several invocations of _vm_unmap_aliases() then each of them will flush the dirty range. That's pointless and just increases the probability of full TLB flushes. Avoid that by resetting the flush range after accounting for it. That's safe versus other invocations of _vm_unmap_aliases() because this is all serialized with vmap_purge_lock. Link: https://lkml.kernel.org/r/20230525124504.692056496@linutronix.de Signed-off-by: Thomas Gleixner Reviewed-by: Baoquan He Reviewed-by: Christoph Hellwig Reviewed-by: Lorenzo Stoakes Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton --- mm/vmalloc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index eaef5e0400db..ad9a1d9e314f 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2226,7 +2226,7 @@ static void vb_free(unsigned long addr, unsigned long size) spin_lock(&vb->lock); - /* Expand dirty range */ + /* Expand the not yet TLB flushed dirty range */ vb->dirty_min = min(vb->dirty_min, offset); vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); @@ -2264,7 +2264,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) * space to be flushed. */ if (!purge_fragmented_block(vb, vbq, &purge_list) && - vb->dirty && vb->dirty != VMAP_BBMAP_BITS) { + vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; unsigned long s, e; @@ -2274,6 +2274,10 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) start = min(s, start); end = max(e, end); + /* Prevent that this is flushed again */ + vb->dirty_min = VMAP_BBMAP_BITS; + vb->dirty_max = 0; + flush = 1; } spin_unlock(&vb->lock); -- cgit v1.2.3 From 43d7650234c62201ba3ca5b731226b0b189989a8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:07 +0200 Subject: mm/vmalloc: check free space in vmap_block lockless vb_alloc() unconditionally locks a vmap_block on the free list to check the free space. This can be done locklessly because vmap_block::free never increases, it's only decreased on allocations. Check the free space lockless and only if that succeeds, recheck under the lock. Link: https://lkml.kernel.org/r/20230525124504.750481992@linutronix.de Signed-off-by: Thomas Gleixner Reviewed-by: Uladzislau Rezki (Sony) Reviewed-by: Christoph Hellwig Reviewed-by: Lorenzo Stoakes Reviewed-by: Baoquan He Signed-off-by: Andrew Morton --- mm/vmalloc.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index ad9a1d9e314f..679112e2ffd2 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2168,6 +2168,9 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) list_for_each_entry_rcu(vb, &vbq->free, free_list) { unsigned long pages_off; + if (READ_ONCE(vb->free) < (1UL << order)) + continue; + spin_lock(&vb->lock); if (vb->free < (1UL << order)) { spin_unlock(&vb->lock); @@ -2176,7 +2179,7 @@ static void *vb_alloc(unsigned long size, gfp_t gfp_mask) pages_off = VMAP_BBMAP_BITS - vb->free; vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); - vb->free -= 1UL << order; + WRITE_ONCE(vb->free, vb->free - (1UL << order)); bitmap_set(vb->used_map, pages_off, (1UL << order)); if (vb->free == 0) { spin_lock(&vbq->lock); -- cgit v1.2.3 From 7f48121e9fa82bdaf0bd0f7a7e49f48803c6c0e8 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:08 +0200 Subject: mm/vmalloc: add missing READ/WRITE_ONCE() annotations purge_fragmented_blocks() accesses vmap_block::free and vmap_block::dirty lockless for a quick check. Add the missing READ/WRITE_ONCE() annotations. Link: https://lkml.kernel.org/r/20230525124504.807356682@linutronix.de Signed-off-by: Thomas Gleixner Reviewed-by: Uladzislau Rezki (Sony) Reviewed-by: Christoph Hellwig Reviewed-by: Baoquan He Cc: Lorenzo Stoakes Signed-off-by: Andrew Morton --- mm/vmalloc.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 679112e2ffd2..f643c7286c7a 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2094,9 +2094,9 @@ static bool purge_fragmented_block(struct vmap_block *vb, return false; /* prevent further allocs after releasing lock */ - vb->free = 0; + WRITE_ONCE(vb->free, 0); /* prevent purging it again */ - vb->dirty = VMAP_BBMAP_BITS; + WRITE_ONCE(vb->dirty, VMAP_BBMAP_BITS); vb->dirty_min = 0; vb->dirty_max = VMAP_BBMAP_BITS; spin_lock(&vbq->lock); @@ -2124,8 +2124,11 @@ static void purge_fragmented_blocks(int cpu) rcu_read_lock(); list_for_each_entry_rcu(vb, &vbq->free, free_list) { - if (vb->free + vb->dirty != VMAP_BBMAP_BITS || - vb->dirty == VMAP_BBMAP_BITS) + unsigned long free = READ_ONCE(vb->free); + unsigned long dirty = READ_ONCE(vb->dirty); + + if (free + dirty != VMAP_BBMAP_BITS || + dirty == VMAP_BBMAP_BITS) continue; spin_lock(&vb->lock); @@ -2233,7 +2236,7 @@ static void vb_free(unsigned long addr, unsigned long size) vb->dirty_min = min(vb->dirty_min, offset); vb->dirty_max = max(vb->dirty_max, offset + (1UL << order)); - vb->dirty += 1UL << order; + WRITE_ONCE(vb->dirty, vb->dirty + (1UL << order)); if (vb->dirty == VMAP_BBMAP_BITS) { BUG_ON(vb->free); spin_unlock(&vb->lock); -- cgit v1.2.3 From 77e50af07f14ea7b53f82f9417ddf2fd96c78da3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 25 May 2023 14:57:09 +0200 Subject: mm/vmalloc: dont purge usable blocks unnecessarily Purging fragmented blocks is done unconditionally in several contexts: 1) From drain_vmap_area_work(), when the number of lazy to be freed vmap_areas reached the threshold 2) Reclaiming vmalloc address space from pcpu_get_vm_areas() 3) _vm_unmap_aliases() #1 There is no reason to zap fragmented vmap blocks unconditionally, simply because reclaiming all lazy areas drains at least 32MB * fls(num_online_cpus()) per invocation which is plenty. #2 Reclaiming when running out of space or due to memory pressure makes a lot of sense #3 _unmap_aliases() requires to touch everything because the caller has no clue which vmap_area used a particular page last and the vmap_area lost that information too. Except for the vfree + VM_FLUSH_RESET_PERMS case, which removes the vmap area first and then cares about the flush. That in turn requires a full walk of _all_ vmap areas including the one which was just added to the purge list. But as this has to be flushed anyway this is an opportunity to combine outstanding TLB flushes and do the housekeeping of purging freed areas, but like #1 there is no real good reason to zap usable vmap blocks unconditionally. Add a @force_purge argument to the newly split out block purge function and if not true only purge fragmented blocks which have less than 1/4 of their capacity left. Rename purge_vmap_area_lazy() to reclaim_and_purge_vmap_areas() to make it clear what the function does. [lstoakes@gmail.com: correct VMAP_PURGE_THRESHOLD check] Link: https://lkml.kernel.org/r/3e92ef61-b910-4576-88e7-cf43211fd4e7@lucifer.local Link: https://lkml.kernel.org/r/20230525124504.864005691@linutronix.de Signed-off-by: Thomas Gleixner Signed-off-by: Lorenzo Stoakes Reviewed-by: Baoquan He Cc: Christoph Hellwig Cc: Lorenzo Stoakes Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton --- mm/vmalloc.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index f643c7286c7a..7c32435219b1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -791,7 +791,7 @@ get_subtree_max_size(struct rb_node *node) RB_DECLARE_CALLBACKS_MAX(static, free_vmap_area_rb_augment_cb, struct vmap_area, rb_node, unsigned long, subtree_max_size, va_size) -static void purge_vmap_area_lazy(void); +static void reclaim_and_purge_vmap_areas(void); static BLOCKING_NOTIFIER_HEAD(vmap_notify_list); static void drain_vmap_area_work(struct work_struct *work); static DECLARE_WORK(drain_vmap_work, drain_vmap_area_work); @@ -1649,7 +1649,7 @@ retry: overflow: if (!purged) { - purge_vmap_area_lazy(); + reclaim_and_purge_vmap_areas(); purged = 1; goto retry; } @@ -1785,9 +1785,10 @@ out: } /* - * Kick off a purge of the outstanding lazy areas. + * Reclaim vmap areas by purging fragmented blocks and purge_vmap_area_list. */ -static void purge_vmap_area_lazy(void) +static void reclaim_and_purge_vmap_areas(void) + { mutex_lock(&vmap_purge_lock); purge_fragmented_blocks_allcpus(); @@ -1908,6 +1909,12 @@ static struct vmap_area *find_unlink_vmap_area(unsigned long addr) #define VMAP_BLOCK_SIZE (VMAP_BBMAP_BITS * PAGE_SIZE) +/* + * Purge threshold to prevent overeager purging of fragmented blocks for + * regular operations: Purge if vb->free is less than 1/4 of the capacity. + */ +#define VMAP_PURGE_THRESHOLD (VMAP_BBMAP_BITS / 4) + #define VMAP_RAM 0x1 /* indicates vm_map_ram area*/ #define VMAP_BLOCK 0x2 /* mark out the vmap_block sub-type*/ #define VMAP_FLAGS_MASK 0x3 @@ -2087,12 +2094,17 @@ static void free_vmap_block(struct vmap_block *vb) } static bool purge_fragmented_block(struct vmap_block *vb, - struct vmap_block_queue *vbq, struct list_head *purge_list) + struct vmap_block_queue *vbq, struct list_head *purge_list, + bool force_purge) { if (vb->free + vb->dirty != VMAP_BBMAP_BITS || vb->dirty == VMAP_BBMAP_BITS) return false; + /* Don't overeagerly purge usable blocks unless requested */ + if (!(force_purge || vb->free < VMAP_PURGE_THRESHOLD)) + return false; + /* prevent further allocs after releasing lock */ WRITE_ONCE(vb->free, 0); /* prevent purging it again */ @@ -2132,7 +2144,7 @@ static void purge_fragmented_blocks(int cpu) continue; spin_lock(&vb->lock); - purge_fragmented_block(vb, vbq, &purge); + purge_fragmented_block(vb, vbq, &purge, true); spin_unlock(&vb->lock); } rcu_read_unlock(); @@ -2269,7 +2281,7 @@ static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush) * not purgeable, check whether there is dirty * space to be flushed. */ - if (!purge_fragmented_block(vb, vbq, &purge_list) && + if (!purge_fragmented_block(vb, vbq, &purge_list, false) && vb->dirty_max && vb->dirty != VMAP_BBMAP_BITS) { unsigned long va_start = vb->va->va_start; unsigned long s, e; @@ -4175,7 +4187,7 @@ recovery: overflow: spin_unlock(&free_vmap_area_lock); if (!purged) { - purge_vmap_area_lazy(); + reclaim_and_purge_vmap_areas(); purged = true; /* Before "retry", check if we recover. */ -- cgit v1.2.3 From 75990f6459b9cf61a94e8a08d0f6a4aa0b8cf3b5 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:53:56 +0800 Subject: mm: compaction: drop the redundant page validation in update_pageblock_skip() Patch series "Misc cleanups and improvements for compaction". This series cantains some cleanups and improvements for compaction. This patch (of 6): The caller has validated the page before calling update_pageblock_skip(), thus drop the redundant page validation in update_pageblock_skip(). Link: https://lkml.kernel.org/r/5142e15b9295fe8c447dbb39b7907a20177a1413.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton --- mm/compaction.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 261071a07681..83004c15715a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -436,9 +436,6 @@ static void update_pageblock_skip(struct compact_control *cc, if (cc->no_set_skip_hint) return; - if (!page) - return; - set_pageblock_skip(page); /* Update where async and sync compaction should restart */ -- cgit v1.2.3 From 2dbd90054f965c899b9adb62b2d0d215f687d04b Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:53:57 +0800 Subject: mm: compaction: change fast_isolate_freepages() to void type No caller cares about the return value of fast_isolate_freepages(), void it. Link: https://lkml.kernel.org/r/759fca20b22ebf4c81afa30496837b9e0fb2e53b.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton --- mm/compaction.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 83004c15715a..d4cee5803214 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1413,8 +1413,7 @@ static int next_search_order(struct compact_control *cc, int order) return order; } -static unsigned long -fast_isolate_freepages(struct compact_control *cc) +static void fast_isolate_freepages(struct compact_control *cc) { unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); unsigned int nr_scanned = 0; @@ -1427,7 +1426,7 @@ fast_isolate_freepages(struct compact_control *cc) /* Full compaction passes in a negative order */ if (cc->order <= 0) - return cc->free_pfn; + return; /* * If starting the scan, use a deeper search and use the highest @@ -1566,11 +1565,10 @@ fast_isolate_freepages(struct compact_control *cc) cc->total_free_scanned += nr_scanned; if (!page) - return cc->free_pfn; + return; low_pfn = page_to_pfn(page); fast_isolate_around(cc, low_pfn); - return low_pfn; } /* -- cgit v1.2.3 From cf650342f83ae655c6d05a1a74ae1672459973d0 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:53:58 +0800 Subject: mm: compaction: skip more fully scanned pageblock In fast_isolate_around(), it assumes the pageblock is fully scanned if cc->nr_freepages < cc->nr_migratepages after trying to isolate some free pages, and will set skip flag to avoid scanning in future. However this can miss setting the skip flag for a fully scanned pageblock (returned 'start_pfn' is equal to 'end_pfn') in the case where cc->nr_freepages is larger than cc->nr_migratepages. So using the returned 'start_pfn' from isolate_freepages_block() and 'end_pfn' to decide if a pageblock is fully scanned makes more sense. It can also cover the case where cc->nr_freepages < cc->nr_migratepages, which means the 'start_pfn' is usually equal to 'end_pfn' except some uncommon fatal error occurs after non-strict mode isolation. Link: https://lkml.kernel.org/r/f4efd2fa08735794a6d809da3249b6715ba6ad38.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton --- mm/compaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index d4cee5803214..c5f97bfd629c 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1389,7 +1389,7 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn) isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false); /* Skip this pageblock in the future as it's full or nearly full */ - if (cc->nr_freepages < cc->nr_migratepages) + if (start_pfn == end_pfn) set_pageblock_skip(page); return; -- cgit v1.2.3 From 8b71b499ff98fdcda7efefc146841a8b4d26813d Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:53:59 +0800 Subject: mm: compaction: only set skip flag if cc->no_set_skip_hint is false To keep the same logic as test_and_set_skip(), only set the skip flag if cc->no_set_skip_hint is false, which makes code more reasonable. Link: https://lkml.kernel.org/r/0eb2cd2407ffb259ae6e3071e10f70f2d41d0f3e.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton --- mm/compaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index c5f97bfd629c..3b09d8d02581 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1201,7 +1201,7 @@ isolate_abort: * rescanned twice in a row. */ if (low_pfn == end_pfn && (!nr_isolated || cc->finish_pageblock)) { - if (valid_page && !skip_updated) + if (!cc->no_set_skip_hint && valid_page && !skip_updated) set_pageblock_skip(valid_page); update_cached_migrate(cc, low_pfn); } -- cgit v1.2.3 From 447ba88658faa8dbfd29d557daa38b7d88f460ec Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:54:00 +0800 Subject: mm: compaction: add trace event for fast freepages isolation The fast_isolate_freepages() can also isolate freepages, but we can not know the fast isolation efficiency to understand the fast isolation pressure. So add a trace event to show some numbers to help to understand the efficiency for fast freepages isolation. Link: https://lkml.kernel.org/r/78d2932d0160d122c15372aceb3f2c45460a17fc.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Signed-off-by: Andrew Morton --- include/trace/events/compaction.h | 11 +++++++++++ mm/compaction.c | 6 +++++- 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h index 3313eb83c117..2b2a975efd20 100644 --- a/include/trace/events/compaction.h +++ b/include/trace/events/compaction.h @@ -64,6 +64,17 @@ DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_isolate_freepages, TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) ); +DEFINE_EVENT(mm_compaction_isolate_template, mm_compaction_fast_isolate_freepages, + + TP_PROTO( + unsigned long start_pfn, + unsigned long end_pfn, + unsigned long nr_scanned, + unsigned long nr_taken), + + TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) +); + #ifdef CONFIG_COMPACTION TRACE_EVENT(mm_compaction_migratepages, diff --git a/mm/compaction.c b/mm/compaction.c index 3b09d8d02581..ce6293bf9c4a 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1416,7 +1416,7 @@ static int next_search_order(struct compact_control *cc, int order) static void fast_isolate_freepages(struct compact_control *cc) { unsigned int limit = max(1U, freelist_scan_limit(cc) >> 1); - unsigned int nr_scanned = 0; + unsigned int nr_scanned = 0, total_isolated = 0; unsigned long low_pfn, min_pfn, highest = 0; unsigned long nr_isolated = 0; unsigned long distance; @@ -1515,6 +1515,7 @@ static void fast_isolate_freepages(struct compact_control *cc) set_page_private(page, order); nr_isolated = 1 << order; nr_scanned += nr_isolated - 1; + total_isolated += nr_isolated; cc->nr_freepages += nr_isolated; list_add_tail(&page->lru, &cc->freepages); count_compact_events(COMPACTISOLATED, nr_isolated); @@ -1535,6 +1536,9 @@ static void fast_isolate_freepages(struct compact_control *cc) limit = max(1U, limit >> 1); } + trace_mm_compaction_fast_isolate_freepages(min_pfn, cc->free_pfn, + nr_scanned, total_isolated); + if (!page) { cc->fast_search_fail++; if (scan_start) { -- cgit v1.2.3 From a8d13355c660255266ece529e81e6cb26754941a Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Thu, 25 May 2023 20:54:01 +0800 Subject: mm: compaction: skip fast freepages isolation if enough freepages are isolated I've observed that fast isolation often isolates more pages than cc->migratepages, and the excess freepages will be released back to the buddy system. So skip fast freepages isolation if enough freepages are isolated to save some CPU cycles. Link: https://lkml.kernel.org/r/f39c2c07f2dba2732fd9c0843572e5bef96f7f67.1685018752.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Vlastimil Babka Cc: Mel Gorman Cc: Johannes Weiner Signed-off-by: Andrew Morton --- mm/compaction.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index ce6293bf9c4a..767b0815c874 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1528,6 +1528,10 @@ static void fast_isolate_freepages(struct compact_control *cc) spin_unlock_irqrestore(&cc->zone->lock, flags); + /* Skip fast search if enough freepages isolated */ + if (cc->nr_freepages >= cc->nr_migratepages) + break; + /* * Smaller scan on next order so the total scan is related * to freelist_scan_limit. -- cgit v1.2.3 From 283ebdee2da30f65cba04c8fe690b97acfc7f4c4 Mon Sep 17 00:00:00 2001 From: Tu Jinjiang Date: Thu, 25 May 2023 11:16:40 +0800 Subject: mm: shmem: fix UAF bug in shmem_show_options() shmem_show_options() uses sbinfo->mpol without adding it's refcnt. This may lead to race with replacement of the mpol by remount. The execution sequence is as follows. CPU0 CPU1 shmem_show_options() shmem_reconfigure() shmem_show_mpol(seq, sbinfo->mpol) mpol = sbinfo->mpol mpol_put(mpol) mpol->mode The KASAN report is as follows. BUG: KASAN: slab-use-after-free in shmem_show_options+0x21b/0x340 Read of size 2 at addr ffff888124324004 by task mount/2388 CPU: 2 PID: 2388 Comm: mount Not tainted 6.4.0-rc3-00017-g9d646009f65d-dirty #8 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.13.0-1ubuntu1.1 04/01/2014 Call Trace: dump_stack_lvl+0x37/0x50 print_report+0xd0/0x620 ? shmem_show_options+0x21b/0x340 ? __virt_addr_valid+0xf4/0x180 ? shmem_show_options+0x21b/0x340 kasan_report+0xb8/0xe0 ? shmem_show_options+0x21b/0x340 shmem_show_options+0x21b/0x340 ? __pfx_shmem_show_options+0x10/0x10 ? strchr+0x2c/0x50 ? strlen+0x23/0x40 ? seq_puts+0x7d/0x90 show_vfsmnt+0x1e6/0x260 ? __pfx_show_vfsmnt+0x10/0x10 ? __kasan_kmalloc+0x7f/0x90 seq_read_iter+0x57a/0x740 vfs_read+0x2e2/0x4a0 ? __pfx_vfs_read+0x10/0x10 ? down_write_killable+0xb8/0x140 ? __pfx_down_write_killable+0x10/0x10 ? __fget_light+0xa9/0x1e0 ? up_write+0x3f/0x80 ksys_read+0xb8/0x150 ? __pfx_ksys_read+0x10/0x10 ? fpregs_assert_state_consistent+0x55/0x60 ? exit_to_user_mode_prepare+0x2d/0x120 do_syscall_64+0x3c/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc Allocated by task 2387: kasan_save_stack+0x22/0x50 kasan_set_track+0x25/0x30 __kasan_slab_alloc+0x59/0x70 kmem_cache_alloc+0xdd/0x220 mpol_new+0x83/0x150 mpol_parse_str+0x280/0x4a0 shmem_parse_one+0x364/0x520 vfs_parse_fs_param+0xf8/0x1a0 vfs_parse_fs_string+0xc9/0x130 shmem_parse_options+0xb2/0x110 path_mount+0x597/0xdf0 do_mount+0xcd/0xf0 __x64_sys_mount+0xbd/0x100 do_syscall_64+0x3c/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc Freed by task 2389: kasan_save_stack+0x22/0x50 kasan_set_track+0x25/0x30 kasan_save_free_info+0x2e/0x50 __kasan_slab_free+0x10e/0x1a0 kmem_cache_free+0x9c/0x350 shmem_reconfigure+0x278/0x370 reconfigure_super+0x383/0x450 path_mount+0xcc5/0xdf0 do_mount+0xcd/0xf0 __x64_sys_mount+0xbd/0x100 do_syscall_64+0x3c/0x90 entry_SYSCALL_64_after_hwframe+0x72/0xdc The buggy address belongs to the object at ffff888124324000 which belongs to the cache numa_policy of size 32 The buggy address is located 4 bytes inside of freed 32-byte region [ffff888124324000, ffff888124324020) ================================================================== To fix the bug, shmem_get_sbmpol() / mpol_put() needs to be called before / after shmem_show_mpol() call. Link: https://lkml.kernel.org/r/20230525031640.593733-1-tujinjiang@huawei.com Signed-off-by: Tu Jinjiang Reviewed-by: Kefeng Wang Acked-by: Hugh Dickins Cc: Nanyong Sun Signed-off-by: Andrew Morton --- mm/shmem.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/shmem.c b/mm/shmem.c index e40a08c5c6d7..5e54ab5f61f2 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3726,6 +3726,7 @@ out: static int shmem_show_options(struct seq_file *seq, struct dentry *root) { struct shmem_sb_info *sbinfo = SHMEM_SB(root->d_sb); + struct mempolicy *mpol; if (sbinfo->max_blocks != shmem_default_max_blocks()) seq_printf(seq, ",size=%luk", @@ -3768,7 +3769,9 @@ static int shmem_show_options(struct seq_file *seq, struct dentry *root) if (sbinfo->huge) seq_printf(seq, ",huge=%s", shmem_format_huge(sbinfo->huge)); #endif - shmem_show_mpol(seq, sbinfo->mpol); + mpol = shmem_get_sbmpol(sbinfo); + shmem_show_mpol(seq, mpol); + mpol_put(mpol); if (sbinfo->noswap) seq_printf(seq, ",noswap"); return 0; -- cgit v1.2.3 From e0e0b4126c1f1effd480777507a61bd09360dc8f Mon Sep 17 00:00:00 2001 From: "Lars R. Damerow" Date: Wed, 24 May 2023 11:17:33 -0700 Subject: mm/memcontrol: export memcg.swap watermark via sysfs for v2 memcg This patch is similar to commit 8e20d4b33266 ("mm/memcontrol: export memcg->watermark via sysfs for v2 memcg"), but exports the swap counter's watermark. We allocate jobs to our compute farm using heuristics determined by memory and swap usage from previous jobs. Tracking the peak swap usage for new jobs is important for determining when jobs are exceeding their expected bounds, or when our baseline metrics are getting outdated. Our toolset was written to use the "memory.memsw.max_usage_in_bytes" file in cgroups v1, and altering it to poll cgroups v2's "memory.swap.current" would give less accurate results as well as add complication to the code. Having this watermark exposed in sysfs is much preferred. Link: https://lkml.kernel.org/r/20230524181734.125696-1-lars@pixar.com Signed-off-by: Lars R. Damerow Acked-by: Johannes Weiner Cc: Jonathan Corbet Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Shakeel Butt Cc: Tejun Heo Cc: Zefan Li Signed-off-by: Andrew Morton --- Documentation/admin-guide/cgroup-v2.rst | 7 +++++++ mm/memcontrol.c | 13 +++++++++++++ 2 files changed, 20 insertions(+) (limited to 'mm') diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index f67c0829350b..1ffe019483ac 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1582,6 +1582,13 @@ PAGE_SIZE multiple when read back. Healthy workloads are not expected to reach this limit. + memory.swap.peak + A read-only single value file which exists on non-root + cgroups. + + The max swap usage recorded for the cgroup and its + descendants since the creation of the cgroup. + memory.swap.max A read-write single value file which exists on non-root cgroups. The default is "max". diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6a3d4ce87b8a..6ee433be4c3b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7622,6 +7622,14 @@ static u64 swap_current_read(struct cgroup_subsys_state *css, return (u64)page_counter_read(&memcg->swap) * PAGE_SIZE; } +static u64 swap_peak_read(struct cgroup_subsys_state *css, + struct cftype *cft) +{ + struct mem_cgroup *memcg = mem_cgroup_from_css(css); + + return (u64)memcg->swap.watermark * PAGE_SIZE; +} + static int swap_high_show(struct seq_file *m, void *v) { return seq_puts_memcg_tunable(m, @@ -7700,6 +7708,11 @@ static struct cftype swap_files[] = { .seq_show = swap_max_show, .write = swap_max_write, }, + { + .name = "swap.peak", + .flags = CFTYPE_NOT_ON_ROOT, + .read_u64 = swap_peak_read, + }, { .name = "swap.events", .flags = CFTYPE_NOT_ON_ROOT, -- cgit v1.2.3 From 3af0191a594d5ca5d6d2e3602b5d4284c6835e77 Mon Sep 17 00:00:00 2001 From: Kalesh Singh Date: Tue, 23 May 2023 13:59:21 -0700 Subject: Multi-gen LRU: fix workingset accounting On Android app cycle workloads, MGLRU showed a significant reduction in workingset refaults although pgpgin/pswpin remained relatively unchanged. This indicated MGLRU may be undercounting workingset refaults. This has impact on userspace programs, like Android's LMKD, that monitor workingset refault statistics to detect thrashing. It was found that refaults were only accounted if the MGLRU shadow entry was for a recently evicted folio. However, recently evicted folios should be accounted as workingset activation, and refaults should be accounted regardless of recency. Fix MGLRU's workingset refault and activation accounting to more closely match that of the conventional active/inactive LRU. Link: https://lkml.kernel.org/r/20230523205922.3852731-1-kaleshsingh@google.com Fixes: ac35a4902374 ("mm: multi-gen LRU: minimal implementation") Signed-off-by: Kalesh Singh Reported-by: Charan Teja Kalla Acked-by: Yu Zhao Cc: Brian Geffon Cc: Jan Alexander Steffens (heftig) Cc: Oleksandr Natalenko Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- mm/vmscan.c | 1 - mm/workingset.c | 10 +++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index dbbfcc631f5c..3024b4619b65 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4925,7 +4925,6 @@ static bool sort_folio(struct lruvec *lruvec, struct folio *folio, int tier_idx) WRITE_ONCE(lrugen->protected[hist][type][tier - 1], lrugen->protected[hist][type][tier - 1] + delta); - __mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); return true; } diff --git a/mm/workingset.c b/mm/workingset.c index 5796e927e6d7..4686ae363000 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -278,6 +278,7 @@ static bool lru_gen_test_recent(void *shadow, bool file, struct lruvec **lruvec, static void lru_gen_refault(struct folio *folio, void *shadow) { + bool recent; int hist, tier, refs; bool workingset; unsigned long token; @@ -288,10 +289,13 @@ static void lru_gen_refault(struct folio *folio, void *shadow) rcu_read_lock(); - if (!lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset)) + recent = lru_gen_test_recent(shadow, type, &lruvec, &token, &workingset); + if (lruvec != folio_lruvec(folio)) goto unlock; - if (lruvec != folio_lruvec(folio)) + mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); + + if (!recent) goto unlock; lrugen = &lruvec->lrugen; @@ -302,7 +306,7 @@ static void lru_gen_refault(struct folio *folio, void *shadow) tier = lru_tier_from_refs(refs); atomic_long_add(delta, &lrugen->refaulted[hist][type][tier]); - mod_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + type, delta); + mod_lruvec_state(lruvec, WORKINGSET_ACTIVATE_BASE + type, delta); /* * Count the following two cases as stalls: -- cgit v1.2.3 From ba1b67c79cb3c5f5d11cb475bb7045929b235538 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Fri, 26 May 2023 08:52:50 +0000 Subject: mm/mm_init.c: introduce reset_memoryless_node_totalpages() Currently, no matter whether a node actually has memory or not, calculate_node_totalpages() is used to account number of pages in zone/node. However, for node without memory, these unnecessary calculations can be skipped. All the zone/node page counts can be set to 0 directly. So introduce reset_memoryless_node_totalpages() to perform this action. Furthermore, calculate_node_totalpages() only gets called for the node with memory. Link: https://lkml.kernel.org/r/20230526085251.1977-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: Mike Rapoport Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/mm_init.c | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 10bf560302c4..6f7da396b67b 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1176,10 +1176,6 @@ static unsigned long __init zone_absent_pages_in_node(int nid, unsigned long zone_start_pfn, zone_end_pfn; unsigned long nr_absent; - /* When hotadd a new node from cpu_up(), the node should be empty */ - if (!node_start_pfn && !node_end_pfn) - return 0; - zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); @@ -1229,9 +1225,6 @@ static unsigned long __init zone_spanned_pages_in_node(int nid, { unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; - /* When hotadd a new node from cpu_up(), the node should be empty */ - if (!node_start_pfn && !node_end_pfn) - return 0; /* Get the start and end of the zone */ *zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); @@ -1252,6 +1245,24 @@ static unsigned long __init zone_spanned_pages_in_node(int nid, return *zone_end_pfn - *zone_start_pfn; } +static void __init reset_memoryless_node_totalpages(struct pglist_data *pgdat) +{ + struct zone *z; + + for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) { + z->zone_start_pfn = 0; + z->spanned_pages = 0; + z->present_pages = 0; +#if defined(CONFIG_MEMORY_HOTPLUG) + z->present_early_pages = 0; +#endif + } + + pgdat->node_spanned_pages = 0; + pgdat->node_present_pages = 0; + pr_debug("On node %d totalpages: 0\n", pgdat->node_id); +} + static void __init calculate_node_totalpages(struct pglist_data *pgdat, unsigned long node_start_pfn, unsigned long node_end_pfn) @@ -1704,11 +1715,13 @@ static void __init free_area_init_node(int nid) pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid, (u64)start_pfn << PAGE_SHIFT, end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0); + + calculate_node_totalpages(pgdat, start_pfn, end_pfn); } else { pr_info("Initmem setup node %d as memoryless\n", nid); - } - calculate_node_totalpages(pgdat, start_pfn, end_pfn); + reset_memoryless_node_totalpages(pgdat); + } alloc_node_mem_map(pgdat); pgdat_set_deferred_range(pgdat); -- cgit v1.2.3 From 1c2d252f5b4289e1c6840bcf394157b70c639d6e Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Fri, 26 May 2023 08:52:51 +0000 Subject: mm/mm_init.c: do not calculate zone_start_pfn/zone_end_pfn in zone_absent_pages_in_node() In calculate_node_totalpages(), zone_start_pfn/zone_end_pfn are already calculated in zone_spanned_pages_in_node(), so use them as parameters instead of node_start_pfn/node_end_pfn and the duplicated calculation process can de dropped. Link: https://lkml.kernel.org/r/20230526085251.1977-2-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: Mike Rapoport Reviewed-by: Mike Rapoport (IBM) Cc: David Hildenbrand Cc: Haifeng Xu Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/mm_init.c | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 6f7da396b67b..19652c1b3426 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1168,20 +1168,15 @@ unsigned long __init absent_pages_in_range(unsigned long start_pfn, /* Return the number of page frames in holes in a zone on a node */ static unsigned long __init zone_absent_pages_in_node(int nid, unsigned long zone_type, - unsigned long node_start_pfn, - unsigned long node_end_pfn) + unsigned long zone_start_pfn, + unsigned long zone_end_pfn) { - unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type]; - unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; - unsigned long zone_start_pfn, zone_end_pfn; unsigned long nr_absent; - zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); - zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); + /* zone is empty, we don't have any absent pages */ + if (zone_start_pfn == zone_end_pfn) + return 0; - adjust_zone_range_for_zone_movable(nid, zone_type, - node_start_pfn, node_end_pfn, - &zone_start_pfn, &zone_end_pfn); nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn); /* @@ -1274,7 +1269,7 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat, struct zone *zone = pgdat->node_zones + i; unsigned long zone_start_pfn, zone_end_pfn; unsigned long spanned, absent; - unsigned long size, real_size; + unsigned long real_size; spanned = zone_spanned_pages_in_node(pgdat->node_id, i, node_start_pfn, @@ -1282,23 +1277,22 @@ static void __init calculate_node_totalpages(struct pglist_data *pgdat, &zone_start_pfn, &zone_end_pfn); absent = zone_absent_pages_in_node(pgdat->node_id, i, - node_start_pfn, - node_end_pfn); + zone_start_pfn, + zone_end_pfn); - size = spanned; - real_size = size - absent; + real_size = spanned - absent; - if (size) + if (spanned) zone->zone_start_pfn = zone_start_pfn; else zone->zone_start_pfn = 0; - zone->spanned_pages = size; + zone->spanned_pages = spanned; zone->present_pages = real_size; #if defined(CONFIG_MEMORY_HOTPLUG) zone->present_early_pages = real_size; #endif - totalpages += size; + totalpages += spanned; realtotalpages += real_size; } -- cgit v1.2.3 From f9f956b550b8ce6fb902af9f29ba31b3e0fd052d Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 29 May 2023 14:13:51 +0800 Subject: swap: remove get/put_swap_device() in __swap_count() Patch series "swap: cleanup get/put_swap_device() usage", v3. The general rule to use a swap entry is as follows. When we get a swap entry, if there aren't some other ways to prevent swapoff, such as the folio in swap cache is locked, page table lock is held, etc., the swap entry may become invalid because of swapoff. Then, we need to enclose all swap related functions with get_swap_device() and put_swap_device(), unless the swap functions call get/put_swap_device() by themselves. Based on the above rule, all get/put_swap_device() usage are checked and cleaned up if necessary. This patch (of 5): get/put_swap_device() are added to __swap_count() in commit eb085574a752 ("mm, swap: fix race between swapoff and some swap operations"). Later, in commit 2799e77529c2 ("swap: fix do_swap_page() race with swapoff"), get/put_swap_device() are added to do_swap_page(). And they enclose the only call site of __swap_count(). So, it's safe to remove get/put_swap_device() in __swap_count() now. Link: https://lkml.kernel.org/r/20230529061355.125791-1-ying.huang@intel.com Link: https://lkml.kernel.org/r/20230529061355.125791-2-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: Yosry Ahmed Reviewed-by: David Hildenbrand Reviewed-by: Chris Li (Google) Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Minchan Kim Cc: Tim Chen Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/swapfile.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index c74259001d5e..cf8b16b6a98e 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1433,16 +1433,10 @@ void swapcache_free_entries(swp_entry_t *entries, int n) int __swap_count(swp_entry_t entry) { - struct swap_info_struct *si; + struct swap_info_struct *si = swp_swap_info(entry); pgoff_t offset = swp_offset(entry); - int count = 0; - si = get_swap_device(entry); - if (si) { - count = swap_count(si->swap_map[offset]); - put_swap_device(si); - } - return count; + return swap_count(si->swap_map[offset]); } /* -- cgit v1.2.3 From 46a774d3eae523fd9a1907ba90e9a116cd1c5ddc Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 29 May 2023 14:13:52 +0800 Subject: swap, __read_swap_cache_async(): enlarge get/put_swap_device protection range This makes the function a little easier to be understood because we don't need to consider swapoff. And this makes it possible to remove get/put_swap_device() calling in some functions called by __read_swap_cache_async(). Link: https://lkml.kernel.org/r/20230529061355.125791-3-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: David Hildenbrand Reviewed-by: Chris Li (Google) Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Minchan Kim Cc: Tim Chen Cc: Yang Shi Cc: Yu Zhao Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/swap_state.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/swap_state.c b/mm/swap_state.c index b76a65ac28b3..a8450b4a110c 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -417,9 +417,13 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, { struct swap_info_struct *si; struct folio *folio; + struct page *page; void *shadow = NULL; *new_page_allocated = false; + si = get_swap_device(entry); + if (!si) + return NULL; for (;;) { int err; @@ -428,14 +432,12 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * called after swap_cache_get_folio() failed, re-calling * that would confuse statistics. */ - si = get_swap_device(entry); - if (!si) - return NULL; folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry)); - put_swap_device(si); - if (!IS_ERR(folio)) - return folio_file_page(folio, swp_offset(entry)); + if (!IS_ERR(folio)) { + page = folio_file_page(folio, swp_offset(entry)); + goto got_page; + } /* * Just skip read ahead for unused swap slot. @@ -446,7 +448,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * else swap_off will be aborted if we return NULL. */ if (!__swp_swapcount(entry) && swap_slot_cache_enabled) - return NULL; + goto fail_put_swap; /* * Get a new page to read into from swap. Allocate it now, @@ -455,7 +457,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, */ folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false); if (!folio) - return NULL; + goto fail_put_swap; /* * Swap entry may have been freed since our caller observed it. @@ -466,7 +468,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, folio_put(folio); if (err != -EEXIST) - return NULL; + goto fail_put_swap; /* * We might race against __delete_from_swap_cache(), and @@ -500,12 +502,17 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, /* Caller will initiate read into locked folio */ folio_add_lru(folio); *new_page_allocated = true; - return &folio->page; + page = &folio->page; +got_page: + put_swap_device(si); + return page; fail_unlock: put_swap_folio(folio, entry); folio_unlock(folio); folio_put(folio); +fail_put_swap: + put_swap_device(si); return NULL; } @@ -514,6 +521,10 @@ fail_unlock: * and reading the disk if it is not already cached. * A failure return means that either the page allocation failed or that * the swap entry is no longer in use. + * + * get/put_swap_device() aren't needed to call this function, because + * __read_swap_cache_async() call them and swap_readpage() holds the + * swap cache folio lock. */ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, -- cgit v1.2.3 From 3ecdeb0f876e91c4a7129ba2ba5baa530aa6c4f9 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 29 May 2023 14:13:53 +0800 Subject: swap: remove __swp_swapcount() __swp_swapcount() just encloses the calling to swap_swapcount() with get/put_swap_device(). It is called in __read_swap_cache_async() only, which encloses the calling with get/put_swap_device() already. So, __read_swap_cache_async() can call swap_swapcount() directly. Link: https://lkml.kernel.org/r/20230529061355.125791-4-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: David Hildenbrand Reviewed-by: Chris Li (Google) Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Minchan Kim Cc: Tim Chen Cc: Yang Shi Cc: Yu Zhao Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- include/linux/swap.h | 4 ++-- mm/swap_state.c | 2 +- mm/swapfile.c | 20 +------------------- 3 files changed, 4 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index b2128df5edea..2ddbfd85f6c7 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -513,7 +513,7 @@ int find_first_swap(dev_t *device); extern unsigned int count_swap_pages(int, int); extern sector_t swapdev_block(int, pgoff_t); extern int __swap_count(swp_entry_t entry); -extern int __swp_swapcount(swp_entry_t entry); +extern int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry); extern int swp_swapcount(swp_entry_t entry); extern struct swap_info_struct *page_swap_info(struct page *); extern struct swap_info_struct *swp_swap_info(swp_entry_t entry); @@ -591,7 +591,7 @@ static inline int __swap_count(swp_entry_t entry) return 0; } -static inline int __swp_swapcount(swp_entry_t entry) +static inline int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) { return 0; } diff --git a/mm/swap_state.c b/mm/swap_state.c index a8450b4a110c..ef32353c18a6 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -447,7 +447,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * as SWAP_HAS_CACHE. That's done in later part of code or * else swap_off will be aborted if we return NULL. */ - if (!__swp_swapcount(entry) && swap_slot_cache_enabled) + if (!swap_swapcount(si, entry) && swap_slot_cache_enabled) goto fail_put_swap; /* diff --git a/mm/swapfile.c b/mm/swapfile.c index cf8b16b6a98e..2d264efe90d2 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1444,7 +1444,7 @@ int __swap_count(swp_entry_t entry) * This does not give an exact answer when swap count is continued, * but does include the high COUNT_CONTINUED flag to allow for that. */ -static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) +int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) { pgoff_t offset = swp_offset(entry); struct swap_cluster_info *ci; @@ -1456,24 +1456,6 @@ static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) return count; } -/* - * How many references to @entry are currently swapped out? - * This does not give an exact answer when swap count is continued, - * but does include the high COUNT_CONTINUED flag to allow for that. - */ -int __swp_swapcount(swp_entry_t entry) -{ - int count = 0; - struct swap_info_struct *si; - - si = get_swap_device(entry); - if (si) { - count = swap_swapcount(si, entry); - put_swap_device(si); - } - return count; -} - /* * How many references to @entry are currently swapped out? * This considers COUNT_CONTINUED so it returns exact answer. -- cgit v1.2.3 From c07aee4f82af3c466509782b15658837fe53babc Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 29 May 2023 14:13:54 +0800 Subject: swap: remove get/put_swap_device() in __swap_duplicate() __swap_duplicate() is called by - swap_shmem_alloc(): the folio in swap cache is locked. - copy_nonpresent_pte() -> swap_duplicate() and try_to_unmap_one() -> swap_duplicate(): the page table lock is held. - __read_swap_cache_async() -> swapcache_prepare(): enclosed with get/put_swap_device() in __read_swap_cache_async() already. So, it's safe to remove get/put_swap_device() in __swap_duplicate(). Link: https://lkml.kernel.org/r/20230529061355.125791-5-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: Yosry Ahmed Reviewed-by: David Hildenbrand Reviewed-by: Chris Li (Google) Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Minchan Kim Cc: Tim Chen Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/swapfile.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index 2d264efe90d2..bd035677f196 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3265,9 +3265,7 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) unsigned char has_cache; int err; - p = get_swap_device(entry); - if (!p) - return -EINVAL; + p = swp_swap_info(entry); offset = swp_offset(entry); ci = lock_cluster_or_swap_info(p, offset); @@ -3314,7 +3312,6 @@ static int __swap_duplicate(swp_entry_t entry, unsigned char usage) unlock_out: unlock_cluster_or_swap_info(p, ci); - put_swap_device(p); return err; } -- cgit v1.2.3 From a95722a047724ef75567381976a36f0e44230bd9 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Mon, 29 May 2023 14:13:55 +0800 Subject: swap: comments get_swap_device() with usage rule The general rule to use a swap entry is as follows. When we get a swap entry, if there aren't some other ways to prevent swapoff, such as the folio in swap cache is locked, page table lock is held, etc., the swap entry may become invalid because of swapoff. Then, we need to enclose all swap related functions with get_swap_device() and put_swap_device(), unless the swap functions call get/put_swap_device() by themselves. Add the rule as comments of get_swap_device(). Link: https://lkml.kernel.org/r/20230529061355.125791-6-ying.huang@intel.com Signed-off-by: "Huang, Ying" Reviewed-by: David Hildenbrand Reviewed-by: Yosry Ahmed Reviewed-by: Chris Li (Google) Cc: Hugh Dickins Cc: Johannes Weiner Cc: Matthew Wilcox Cc: Michal Hocko Cc: Minchan Kim Cc: Tim Chen Cc: Yang Shi Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/swapfile.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index bd035677f196..df312534e239 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1220,6 +1220,13 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, } /* + * When we get a swap entry, if there aren't some other ways to + * prevent swapoff, such as the folio in swap cache is locked, page + * table lock is held, etc., the swap entry may become invalid because + * of swapoff. Then, we need to enclose all swap related functions + * with get_swap_device() and put_swap_device(), unless the swap + * functions call get/put_swap_device() by themselves. + * * Check whether swap entry is valid in the swap device. If so, * return pointer to swap_info_struct, and keep the swap entry valid * via preventing the swap device from being swapoff, until @@ -1228,9 +1235,8 @@ static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, * Notice that swapoff or swapoff+swapon can still happen before the * percpu_ref_tryget_live() in get_swap_device() or after the * percpu_ref_put() in put_swap_device() if there isn't any other way - * to prevent swapoff, such as page lock, page table lock, etc. The - * caller must be prepared for that. For example, the following - * situation is possible. + * to prevent swapoff. The caller must be prepared for that. For + * example, the following situation is possible. * * CPU1 CPU2 * do_swap_page() -- cgit v1.2.3 From deedad80f660af8199ea3b3f70939f2d226b9154 Mon Sep 17 00:00:00 2001 From: Yin Fengwei Date: Sat, 29 Apr 2023 16:27:58 +0800 Subject: THP: avoid lock when check whether THP is in deferred list free_transhuge_page() acquires split queue lock then check whether the THP was added to deferred list or not. It brings high deferred queue lock contention. It's safe to check whether the THP is in deferred list or not without holding the deferred queue lock in free_transhuge_page() because when code hit free_transhuge_page(), there is no one tries to add the folio to _deferred_list. Running page_fault1 of will-it-scale + order 2 folio for anonymous mapping with 96 processes on an Ice Lake 48C/96T test box, we could see the 61% split_queue_lock contention: - 63.02% 0.01% page_fault1_pro [kernel.kallsyms] [k] free_transhuge_page - 63.01% free_transhuge_page + 62.91% _raw_spin_lock_irqsave With this patch applied, the split_queue_lock contention is less than 1%. Link: https://lkml.kernel.org/r/20230429082759.1600796-2-fengwei.yin@intel.com Signed-off-by: Yin Fengwei Acked-by: Kirill A. Shutemov Reviewed-by: "Huang, Ying" Cc: Matthew Wilcox Cc: Ryan Roberts Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/huge_memory.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 624671aaa60d..9ccdb3fe3244 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2792,12 +2792,19 @@ void free_transhuge_page(struct page *page) struct deferred_split *ds_queue = get_deferred_split_queue(folio); unsigned long flags; - spin_lock_irqsave(&ds_queue->split_queue_lock, flags); - if (!list_empty(&folio->_deferred_list)) { - ds_queue->split_queue_len--; - list_del(&folio->_deferred_list); + /* + * At this point, there is no one trying to add the folio to + * deferred_list. If folio is not in deferred_list, it's safe + * to check without acquiring the split_queue_lock. + */ + if (data_race(!list_empty(&folio->_deferred_list))) { + spin_lock_irqsave(&ds_queue->split_queue_lock, flags); + if (!list_empty(&folio->_deferred_list)) { + ds_queue->split_queue_len--; + list_del(&folio->_deferred_list); + } + spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); } - spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags); free_compound_page(page); } -- cgit v1.2.3 From 837c2ba56d6fd1ecf7a1c5aa0cdc872f3b74185b Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Sun, 28 May 2023 04:57:20 +0000 Subject: mm/mm_init.c: remove free_area_init_memoryless_node() free_area_init_memoryless_node() is just a wrapper of free_area_init_node(), remove it to clean up. Link: https://lkml.kernel.org/r/20230528045720.4835-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Acked-by: Michal Hocko Cc: Mike Rapoport Signed-off-by: Andrew Morton --- mm/mm_init.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 19652c1b3426..3ddd18a89b66 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1754,11 +1754,6 @@ void __init setup_nr_node_ids(void) } #endif -static void __init free_area_init_memoryless_node(int nid) -{ - free_area_init_node(nid); -} - /* * Some architectures, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For * such cases we allow max_zone_pfn sorted in the descending order @@ -1869,7 +1864,7 @@ void __init free_area_init(unsigned long *max_zone_pfn) panic("Cannot allocate %zuB for node %d.\n", sizeof(*pgdat), nid); arch_refresh_nodedata(nid, pgdat); - free_area_init_memoryless_node(nid); + free_area_init_node(nid); /* * We do not want to confuse userspace by sysfs -- cgit v1.2.3 From 3b11edf1f2398cac206a224308de6628ebeea924 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 14 May 2023 09:28:56 +0900 Subject: mm/page_alloc: don't wake kswapd from rmqueue() unless __GFP_KSWAPD_RECLAIM is specified Commit 73444bc4d8f9 ("mm, page_alloc: do not wake kswapd with zone lock held") moved wakeup_kswapd() from steal_suitable_fallback() to rmqueue() using ZONE_BOOSTED_WATERMARK flag. Only allocation contexts that include ALLOC_KSWAPD (which corresponds to __GFP_KSWAPD_RECLAIM) should wake kswapd, for callers are supposed to remove __GFP_KSWAPD_RECLAIM if trying to hold pgdat->kswapd_wait has a risk of deadlock. But since zone->flags is a shared variable, a thread doing !__GFP_KSWAPD_RECLAIM allocation request might observe this flag being set immediately after another thread doing __GFP_KSWAPD_RECLAIM allocation request set this flag, causing possibility of deadlock. Link: https://lkml.kernel.org/r/c3c3dacf-dd3b-77c9-f96a-d0982b4b2a4f@I-love.SAKURA.ne.jp Fixes: 73444bc4d8f9 ("mm, page_alloc: do not wake kswapd with zone lock held") Signed-off-by: Tetsuo Handa Acked-by: Mel Gorman Cc: "Huang, Ying" Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/page_alloc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7e8673504a3d..091a6cb44265 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2827,7 +2827,8 @@ struct page *rmqueue(struct zone *preferred_zone, out: /* Separate test+clear to avoid unnecessary atomics */ - if (unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { + if ((alloc_flags & ALLOC_KSWAPD) && + unlikely(test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags))) { clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags); wakeup_kswapd(zone, 0, 0, zone_idx(zone)); } -- cgit v1.2.3 From 16618670276a77480e274117992cec5e42ce66a9 Mon Sep 17 00:00:00 2001 From: Xin Hao Date: Wed, 31 May 2023 17:58:17 +0800 Subject: mm: khugepaged: avoid pointless allocation for "struct mm_slot" In __khugepaged_enter(), if "mm->flags" with MMF_VM_HUGEPAGE bit is set, the "mm_slot" will be released and return, so we can call mm_slot_alloc() after test_and_set_bit(). Link: https://lkml.kernel.org/r/20230531095817.11012-1-xhao@linux.alibaba.com Signed-off-by: Xin Hao Reviewed-by: Andrew Morton Signed-off-by: Andrew Morton --- mm/khugepaged.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 6b9d39d65b73..3649ba12a235 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -422,19 +422,17 @@ void __khugepaged_enter(struct mm_struct *mm) struct mm_slot *slot; int wakeup; + /* __khugepaged_exit() must not run from under us */ + VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); + if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) + return; + mm_slot = mm_slot_alloc(mm_slot_cache); if (!mm_slot) return; slot = &mm_slot->slot; - /* __khugepaged_exit() must not run from under us */ - VM_BUG_ON_MM(hpage_collapse_test_exit(mm), mm); - if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) { - mm_slot_free(mm_slot_cache, mm_slot); - return; - } - spin_lock(&khugepaged_mm_lock); mm_slot_insert(mm_slots_hash, mm, slot); /* -- cgit v1.2.3 From e3d9b45fb17cfddb1c414b5981743d4245fcf486 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Thu, 1 Jun 2023 06:35:35 +0000 Subject: mm/mm_init.c: move set_pageblock_order() to free_area_init() pageblock_order only needs to be set once, there is no need to initialize it in every zone/node. Link: https://lkml.kernel.org/r/20230601063536.26882-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Reviewed-by: Mike Rapoport (IBM) Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/mm_init.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 3ddd18a89b66..015355dfdc0b 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1585,7 +1585,6 @@ static void __init free_area_init_core(struct pglist_data *pgdat) if (!size) continue; - set_pageblock_order(); setup_usemap(zone); init_currently_empty_zone(zone, zone->zone_start_pfn, size); } @@ -1852,6 +1851,8 @@ void __init free_area_init(unsigned long *max_zone_pfn) /* Initialise every node */ mminit_verify_pageflags_layout(); setup_nr_node_ids(); + set_pageblock_order(); + for_each_node(nid) { pg_data_t *pgdat; -- cgit v1.2.3 From e0228d590beb0d0af345c58a282f01afac5c57f3 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Fri, 26 May 2023 20:32:27 +0200 Subject: mm: zswap: shrink until can accept This update addresses an issue with the zswap reclaim mechanism, which hinders the efficient offloading of cold pages to disk, thereby compromising the preservation of the LRU order and consequently diminishing, if not inverting, its performance benefits. The functioning of the zswap shrink worker was found to be inadequate, as shown by basic benchmark test. For the test, a kernel build was utilized as a reference, with its memory confined to 1G via a cgroup and a 5G swap file provided. The results are presented below, these are averages of three runs without the use of zswap: real 46m26s user 35m4s sys 7m37s With zswap (zbud) enabled and max_pool_percent set to 1 (in a 32G system), the results changed to: real 56m4s user 35m13s sys 8m43s written_back_pages: 18 reject_reclaim_fail: 0 pool_limit_hit:1478 Besides the evident regression, one thing to notice from this data is the extremely low number of written_back_pages and pool_limit_hit. The pool_limit_hit counter, which is increased in zswap_frontswap_store when zswap is completely full, doesn't account for a particular scenario: once zswap hits his limit, zswap_pool_reached_full is set to true; with this flag on, zswap_frontswap_store rejects pages if zswap is still above the acceptance threshold. Once we include the rejections due to zswap_pool_reached_full && !zswap_can_accept(), the number goes from 1478 to a significant 21578266. Zswap is stuck in an undesirable state where it rejects pages because it's above the acceptance threshold, yet fails to attempt memory reclaimation. This happens because the shrink work is only queued when zswap_frontswap_store detects that it's full and the work itself only reclaims one page per run. This state results in hot pages getting written directly to disk, while cold ones remain memory, waiting only to be invalidated. The LRU order is completely broken and zswap ends up being just an overhead without providing any benefits. This commit applies 2 changes: a) the shrink worker is set to reclaim pages until the acceptance threshold is met and b) the task is also enqueued when zswap is not full but still above the threshold. Testing this suggested update showed much better numbers: real 36m37s user 35m8s sys 9m32s written_back_pages: 10459423 reject_reclaim_fail: 12896 pool_limit_hit: 75653 Link: https://lkml.kernel.org/r/20230526183227.793977-1-cerasuolodomenico@gmail.com Fixes: 45190f01dd40 ("mm/zswap.c: add allocation hysteresis if pool limit is hit") Signed-off-by: Domenico Cerasuolo Acked-by: Johannes Weiner Reviewed-by: Yosry Ahmed Reviewed-by: Vitaly Wool Cc: Dan Streetman Cc: Seth Jennings Signed-off-by: Andrew Morton --- mm/zswap.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 59da2a415fbb..bcb82e09eb64 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -37,6 +37,7 @@ #include #include "swap.h" +#include "internal.h" /********************************* * statistics @@ -587,9 +588,19 @@ static void shrink_worker(struct work_struct *w) { struct zswap_pool *pool = container_of(w, typeof(*pool), shrink_work); + int ret, failures = 0; - if (zpool_shrink(pool->zpool, 1, NULL)) - zswap_reject_reclaim_fail++; + do { + ret = zpool_shrink(pool->zpool, 1, NULL); + if (ret) { + zswap_reject_reclaim_fail++; + if (ret != -EAGAIN) + break; + if (++failures == MAX_RECLAIM_RETRIES) + break; + } + cond_resched(); + } while (!zswap_can_accept()); zswap_pool_put(pool); } @@ -1188,7 +1199,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, if (zswap_pool_reached_full) { if (!zswap_can_accept()) { ret = -ENOMEM; - goto reject; + goto shrink; } else zswap_pool_reached_full = false; } -- cgit v1.2.3 From 0d625446d0a451a683a357799912b9e688629707 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:58:53 +0200 Subject: backing_dev: remove current->backing_dev_info Patch series "cleanup the filemap / direct I/O interaction", v4. This series cleans up some of the generic write helper calling conventions and the page cache writeback / invalidation for direct I/O. This is a spinoff from the no-bufferhead kernel project, for which we'll want to an use iomap based buffered write path in the block layer. This patch (of 12): The last user of current->backing_dev_info disappeared in commit b9b1335e6403 ("remove bdi_congested() and wb_congested() and related functions"). Remove the field and all assignments to it. Link: https://lkml.kernel.org/r/20230601145904.1385409-1-hch@lst.de Link: https://lkml.kernel.org/r/20230601145904.1385409-2-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Christian Brauner Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Reviewed-by: Johannes Thumshirn Reviewed-by: Darrick J. Wong Acked-by: Theodore Ts'o Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Miklos Szeredi Cc: Trond Myklebust Cc: Xiubo Li Signed-off-by: Andrew Morton --- fs/btrfs/file.c | 6 +----- fs/ceph/file.c | 4 ---- fs/ext4/file.c | 2 -- fs/f2fs/file.c | 2 -- fs/fuse/file.c | 4 ---- fs/gfs2/file.c | 2 -- fs/nfs/file.c | 5 +---- fs/ntfs/file.c | 2 -- fs/ntfs3/file.c | 3 --- fs/xfs/xfs_file.c | 4 ---- include/linux/sched.h | 3 --- mm/filemap.c | 3 --- 12 files changed, 2 insertions(+), 38 deletions(-) (limited to 'mm') diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index f649647392e0..ecd43ab66fa6 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1145,7 +1145,6 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, !(BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW | BTRFS_INODE_PREALLOC))) return -EAGAIN; - current->backing_dev_info = inode_to_bdi(inode); ret = file_remove_privs(file); if (ret) return ret; @@ -1165,10 +1164,8 @@ static int btrfs_write_check(struct kiocb *iocb, struct iov_iter *from, loff_t end_pos = round_up(pos + count, fs_info->sectorsize); ret = btrfs_cont_expand(BTRFS_I(inode), oldsize, end_pos); - if (ret) { - current->backing_dev_info = NULL; + if (ret) return ret; - } } return 0; @@ -1689,7 +1686,6 @@ ssize_t btrfs_do_write_iter(struct kiocb *iocb, struct iov_iter *from, if (sync) atomic_dec(&inode->sync_writers); - current->backing_dev_info = NULL; return num_written; } diff --git a/fs/ceph/file.c b/fs/ceph/file.c index f4d8bf7dec88..c8ef72f723ba 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1791,9 +1791,6 @@ retry_snap: else ceph_start_io_write(inode); - /* We can write back this queue in page reclaim */ - current->backing_dev_info = inode_to_bdi(inode); - if (iocb->ki_flags & IOCB_APPEND) { err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false); if (err < 0) @@ -1940,7 +1937,6 @@ out: ceph_end_io_write(inode); out_unlocked: ceph_free_cap_flush(prealloc_cf); - current->backing_dev_info = NULL; return written ? written : err; } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index d101b3b0c7da..bc430270c23c 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -285,9 +285,7 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, if (ret <= 0) goto out; - current->backing_dev_info = inode_to_bdi(inode); ret = generic_perform_write(iocb, from); - current->backing_dev_info = NULL; out: inode_unlock(inode); diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 5ac53d2627d2..4f423d367a44 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -4517,9 +4517,7 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb, if (iocb->ki_flags & IOCB_NOWAIT) return -EOPNOTSUPP; - current->backing_dev_info = inode_to_bdi(inode); ret = generic_perform_write(iocb, from); - current->backing_dev_info = NULL; if (ret > 0) { iocb->ki_pos += ret; diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 89d97f6188e0..97d435874b14 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1362,9 +1362,6 @@ static ssize_t fuse_cache_write_iter(struct kiocb *iocb, struct iov_iter *from) writethrough: inode_lock(inode); - /* We can write back this queue in page reclaim */ - current->backing_dev_info = inode_to_bdi(inode); - err = generic_write_checks(iocb, from); if (err <= 0) goto out; @@ -1409,7 +1406,6 @@ writethrough: iocb->ki_pos += written; } out: - current->backing_dev_info = NULL; inode_unlock(inode); if (written > 0) written = generic_write_sync(iocb, written); diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index 300844f50dcd..904a0d6ac1a1 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -1041,11 +1041,9 @@ retry: goto out_unlock; } - current->backing_dev_info = inode_to_bdi(inode); pagefault_disable(); ret = iomap_file_buffered_write(iocb, from, &gfs2_iomap_ops); pagefault_enable(); - current->backing_dev_info = NULL; if (ret > 0) { iocb->ki_pos += ret; written += ret; diff --git a/fs/nfs/file.c b/fs/nfs/file.c index f0edf5a36237..665ce3fc62ea 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -648,11 +648,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) since = filemap_sample_wb_err(file->f_mapping); nfs_start_io_write(inode); result = generic_write_checks(iocb, from); - if (result > 0) { - current->backing_dev_info = inode_to_bdi(inode); + if (result > 0) result = generic_perform_write(iocb, from); - current->backing_dev_info = NULL; - } nfs_end_io_write(inode); if (result <= 0) goto out; diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c index c481b14e4fd9..e296f804a9c4 100644 --- a/fs/ntfs/file.c +++ b/fs/ntfs/file.c @@ -1911,11 +1911,9 @@ static ssize_t ntfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) inode_lock(vi); /* We can write back this queue in page reclaim. */ - current->backing_dev_info = inode_to_bdi(vi); err = ntfs_prepare_file_for_write(iocb, from); if (iov_iter_count(from) && !err) written = ntfs_perform_write(file, from, iocb->ki_pos); - current->backing_dev_info = NULL; inode_unlock(vi); iocb->ki_pos += written; if (likely(written > 0)) diff --git a/fs/ntfs3/file.c b/fs/ntfs3/file.c index 9a3d55c367d9..86d16a2c8339 100644 --- a/fs/ntfs3/file.c +++ b/fs/ntfs3/file.c @@ -820,7 +820,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) if (!pages) return -ENOMEM; - current->backing_dev_info = inode_to_bdi(inode); err = file_remove_privs(file); if (err) goto out; @@ -993,8 +992,6 @@ static ssize_t ntfs_compress_write(struct kiocb *iocb, struct iov_iter *from) out: kfree(pages); - current->backing_dev_info = NULL; - if (err < 0) return err; diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index aede746541f8..431c3fd0e2b5 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -717,9 +717,6 @@ write_retry: if (ret) goto out; - /* We can write back this queue in page reclaim */ - current->backing_dev_info = inode_to_bdi(inode); - trace_xfs_file_buffered_write(iocb, from); ret = iomap_file_buffered_write(iocb, from, &xfs_buffered_write_iomap_ops); @@ -753,7 +750,6 @@ write_retry: goto write_retry; } - current->backing_dev_info = NULL; out: if (iolock) xfs_iunlock(ip, iolock); diff --git a/include/linux/sched.h b/include/linux/sched.h index eed5d65b8d1f..54780571fe9a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -41,7 +41,6 @@ /* task_struct member predeclarations (sorted alphabetically): */ struct audit_context; -struct backing_dev_info; struct bio_list; struct blk_plug; struct bpf_local_storage; @@ -1186,8 +1185,6 @@ struct task_struct { /* VM state: */ struct reclaim_state *reclaim_state; - struct backing_dev_info *backing_dev_info; - struct io_context *io_context; #ifdef CONFIG_COMPACTION diff --git a/mm/filemap.c b/mm/filemap.c index 570bc8c3db87..0d371ed91a68 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3964,8 +3964,6 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) ssize_t err; ssize_t status; - /* We can write back this queue in page reclaim */ - current->backing_dev_info = inode_to_bdi(inode); err = file_remove_privs(file); if (err) goto out; @@ -4026,7 +4024,6 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) iocb->ki_pos += written; } out: - current->backing_dev_info = NULL; return written ? written : err; } EXPORT_SYMBOL(__generic_file_write_iter); -- cgit v1.2.3 From 182c25e9c157f37bd0ab5a82fe2417e2223df459 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:58:55 +0200 Subject: filemap: update ki_pos in generic_perform_write All callers of generic_perform_write need to updated ki_pos, move it into common code. Link: https://lkml.kernel.org/r/20230601145904.1385409-4-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Xiubo Li Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Acked-by: Theodore Ts'o Acked-by: Darrick J. Wong Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Christian Brauner Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Miklos Szeredi Cc: Trond Myklebust Signed-off-by: Andrew Morton --- fs/ceph/file.c | 2 -- fs/ext4/file.c | 9 +++------ fs/f2fs/file.c | 1 - fs/nfs/file.c | 1 - mm/filemap.c | 8 ++++---- 5 files changed, 7 insertions(+), 14 deletions(-) (limited to 'mm') diff --git a/fs/ceph/file.c b/fs/ceph/file.c index c8ef72f723ba..767f4dfe7def 100644 --- a/fs/ceph/file.c +++ b/fs/ceph/file.c @@ -1891,8 +1891,6 @@ retry_snap: * can not run at the same time */ written = generic_perform_write(iocb, from); - if (likely(written >= 0)) - iocb->ki_pos = pos + written; ceph_end_io_write(inode); } diff --git a/fs/ext4/file.c b/fs/ext4/file.c index bc430270c23c..ea0ada3985cb 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -289,12 +289,9 @@ static ssize_t ext4_buffered_write_iter(struct kiocb *iocb, out: inode_unlock(inode); - if (likely(ret > 0)) { - iocb->ki_pos += ret; - ret = generic_write_sync(iocb, ret); - } - - return ret; + if (unlikely(ret <= 0)) + return ret; + return generic_write_sync(iocb, ret); } static ssize_t ext4_handle_inode_extension(struct inode *inode, loff_t offset, diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c index 4f423d367a44..7134fe8bd008 100644 --- a/fs/f2fs/file.c +++ b/fs/f2fs/file.c @@ -4520,7 +4520,6 @@ static ssize_t f2fs_buffered_write_iter(struct kiocb *iocb, ret = generic_perform_write(iocb, from); if (ret > 0) { - iocb->ki_pos += ret; f2fs_update_iostat(F2FS_I_SB(inode), inode, APP_BUFFERED_IO, ret); } diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 665ce3fc62ea..e8bb4c48a321 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c @@ -655,7 +655,6 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from) goto out; written = result; - iocb->ki_pos += written; nfs_add_stats(inode, NFSIOS_NORMALWRITTENBYTES, written); if (mntflags & NFS_MOUNT_WRITE_EAGER) { diff --git a/mm/filemap.c b/mm/filemap.c index 0d371ed91a68..3a80a69fa9fa 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3930,7 +3930,10 @@ again: balance_dirty_pages_ratelimited(mapping); } while (iov_iter_count(i)); - return written ? written : status; + if (!written) + return status; + iocb->ki_pos += written; + return written; } EXPORT_SYMBOL(generic_perform_write); @@ -4007,7 +4010,6 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) endbyte = pos + status - 1; err = filemap_write_and_wait_range(mapping, pos, endbyte); if (err == 0) { - iocb->ki_pos = endbyte + 1; written += status; invalidate_mapping_pages(mapping, pos >> PAGE_SHIFT, @@ -4020,8 +4022,6 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) } } else { written = generic_perform_write(iocb, from); - if (likely(written > 0)) - iocb->ki_pos += written; } out: return written ? written : err; -- cgit v1.2.3 From 3c435a0fe35c220bec442dffff04a64aacf952b0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:58:56 +0200 Subject: filemap: add a kiocb_write_and_wait helper Factor out a helper that does filemap_write_and_wait_range for the range covered by a read kiocb, or returns -EAGAIN if the kiocb is marked as nowait and there would be pages to write. Link: https://lkml.kernel.org/r/20230601145904.1385409-5-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Acked-by: Darrick J. Wong Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Christian Brauner Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Miklos Szeredi Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Xiubo Li Signed-off-by: Andrew Morton --- block/fops.c | 18 +++--------------- include/linux/pagemap.h | 2 ++ mm/filemap.c | 30 ++++++++++++++++++------------ 3 files changed, 23 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/block/fops.c b/block/fops.c index 58d0aebc7313..575171049c5d 100644 --- a/block/fops.c +++ b/block/fops.c @@ -576,21 +576,9 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) goto reexpand; /* skip atime */ if (iocb->ki_flags & IOCB_DIRECT) { - struct address_space *mapping = iocb->ki_filp->f_mapping; - - if (iocb->ki_flags & IOCB_NOWAIT) { - if (filemap_range_needs_writeback(mapping, pos, - pos + count - 1)) { - ret = -EAGAIN; - goto reexpand; - } - } else { - ret = filemap_write_and_wait_range(mapping, pos, - pos + count - 1); - if (ret < 0) - goto reexpand; - } - + ret = kiocb_write_and_wait(iocb, count); + if (ret < 0) + goto reexpand; file_accessed(iocb->ki_filp); ret = blkdev_direct_IO(iocb, to); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index c1ae5ebc375f..b6a12ca108b7 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -30,6 +30,7 @@ static inline void invalidate_remote_inode(struct inode *inode) int invalidate_inode_pages2(struct address_space *mapping); int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); + int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); int filemap_flush(struct address_space *); @@ -54,6 +55,7 @@ int filemap_check_errors(struct address_space *mapping); void __filemap_set_wb_err(struct address_space *mapping, int err); int filemap_fdatawrite_wbc(struct address_space *mapping, struct writeback_control *wbc); +int kiocb_write_and_wait(struct kiocb *iocb, size_t count); static inline int filemap_write_and_wait(struct address_space *mapping) { diff --git a/mm/filemap.c b/mm/filemap.c index 3a80a69fa9fa..5566e10ca1a7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2735,6 +2735,21 @@ put_folios: } EXPORT_SYMBOL_GPL(filemap_read); +int kiocb_write_and_wait(struct kiocb *iocb, size_t count) +{ + struct address_space *mapping = iocb->ki_filp->f_mapping; + loff_t pos = iocb->ki_pos; + loff_t end = pos + count - 1; + + if (iocb->ki_flags & IOCB_NOWAIT) { + if (filemap_range_needs_writeback(mapping, pos, end)) + return -EAGAIN; + return 0; + } + + return filemap_write_and_wait_range(mapping, pos, end); +} + /** * generic_file_read_iter - generic filesystem read routine * @iocb: kernel I/O control block @@ -2770,18 +2785,9 @@ generic_file_read_iter(struct kiocb *iocb, struct iov_iter *iter) struct address_space *mapping = file->f_mapping; struct inode *inode = mapping->host; - if (iocb->ki_flags & IOCB_NOWAIT) { - if (filemap_range_needs_writeback(mapping, iocb->ki_pos, - iocb->ki_pos + count - 1)) - return -EAGAIN; - } else { - retval = filemap_write_and_wait_range(mapping, - iocb->ki_pos, - iocb->ki_pos + count - 1); - if (retval < 0) - return retval; - } - + retval = kiocb_write_and_wait(iocb, count); + if (retval < 0) + return retval; file_accessed(file); retval = mapping->a_ops->direct_IO(iocb, iter); -- cgit v1.2.3 From e003f74afbd2feadbb9ffbf9135e2d2fb5d320a5 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:58:57 +0200 Subject: filemap: add a kiocb_invalidate_pages helper Factor out a helper that calls filemap_write_and_wait_range and invalidate_inode_pages2_range for the range covered by a write kiocb or returns -EAGAIN if the kiocb is marked as nowait and there would be pages to write or invalidate. Link: https://lkml.kernel.org/r/20230601145904.1385409-6-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Acked-by: Darrick J. Wong Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Christian Brauner Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Miklos Szeredi Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Xiubo Li Signed-off-by: Andrew Morton --- include/linux/pagemap.h | 1 + mm/filemap.c | 48 ++++++++++++++++++++++++++++-------------------- 2 files changed, 29 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index b6a12ca108b7..7b66a67dba51 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -30,6 +30,7 @@ static inline void invalidate_remote_inode(struct inode *inode) int invalidate_inode_pages2(struct address_space *mapping); int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); +int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); diff --git a/mm/filemap.c b/mm/filemap.c index 5566e10ca1a7..6ba6233c4bbb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2750,6 +2750,33 @@ int kiocb_write_and_wait(struct kiocb *iocb, size_t count) return filemap_write_and_wait_range(mapping, pos, end); } +int kiocb_invalidate_pages(struct kiocb *iocb, size_t count) +{ + struct address_space *mapping = iocb->ki_filp->f_mapping; + loff_t pos = iocb->ki_pos; + loff_t end = pos + count - 1; + int ret; + + if (iocb->ki_flags & IOCB_NOWAIT) { + /* we could block if there are any pages in the range */ + if (filemap_range_has_page(mapping, pos, end)) + return -EAGAIN; + } else { + ret = filemap_write_and_wait_range(mapping, pos, end); + if (ret) + return ret; + } + + /* + * After a write we want buffered reads to be sure to go to disk to get + * the new data. We invalidate clean cached page from the region we're + * about to write. We do this *before* the write so that we can return + * without clobbering -EIOCBQUEUED from ->direct_IO(). + */ + return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, + end >> PAGE_SHIFT); +} + /** * generic_file_read_iter - generic filesystem read routine * @iocb: kernel I/O control block @@ -3793,30 +3820,11 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) write_len = iov_iter_count(from); end = (pos + write_len - 1) >> PAGE_SHIFT; - if (iocb->ki_flags & IOCB_NOWAIT) { - /* If there are pages to writeback, return */ - if (filemap_range_has_page(file->f_mapping, pos, - pos + write_len - 1)) - return -EAGAIN; - } else { - written = filemap_write_and_wait_range(mapping, pos, - pos + write_len - 1); - if (written) - goto out; - } - - /* - * After a write we want buffered reads to be sure to go to disk to get - * the new data. We invalidate clean cached page from the region we're - * about to write. We do this *before* the write so that we can return - * without clobbering -EIOCBQUEUED from ->direct_IO(). - */ - written = invalidate_inode_pages2_range(mapping, - pos >> PAGE_SHIFT, end); /* * If a page can not be invalidated, return 0 to fall back * to buffered write. */ + written = kiocb_invalidate_pages(iocb, write_len); if (written) { if (written == -EBUSY) return 0; -- cgit v1.2.3 From c402a9a9430b670926decbb284b756ee6f47c1ec Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:58:58 +0200 Subject: filemap: add a kiocb_invalidate_post_direct_write helper Add a helper to invalidate page cache after a dio write. Link: https://lkml.kernel.org/r/20230601145904.1385409-7-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Hannes Reinecke Acked-by: Darrick J. Wong Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Christian Brauner Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Miklos Szeredi Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Xiubo Li Signed-off-by: Andrew Morton --- fs/direct-io.c | 10 ++-------- fs/iomap/direct-io.c | 12 ++---------- include/linux/fs.h | 5 ----- include/linux/pagemap.h | 1 + mm/filemap.c | 37 ++++++++++++++++++++----------------- 5 files changed, 25 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/fs/direct-io.c b/fs/direct-io.c index 0b380bb8a81e..4f9069aee0fe 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c @@ -285,14 +285,8 @@ static ssize_t dio_complete(struct dio *dio, ssize_t ret, unsigned int flags) * zeros from unwritten extents. */ if (flags & DIO_COMPLETE_INVALIDATE && - ret > 0 && dio_op == REQ_OP_WRITE && - dio->inode->i_mapping->nrpages) { - err = invalidate_inode_pages2_range(dio->inode->i_mapping, - offset >> PAGE_SHIFT, - (offset + ret - 1) >> PAGE_SHIFT); - if (err) - dio_warn_stale_pagecache(dio->iocb->ki_filp); - } + ret > 0 && dio_op == REQ_OP_WRITE) + kiocb_invalidate_post_direct_write(dio->iocb, ret); inode_dio_end(dio->inode); diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c index 6207a59d2162..0795c54a745b 100644 --- a/fs/iomap/direct-io.c +++ b/fs/iomap/direct-io.c @@ -81,7 +81,6 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio) { const struct iomap_dio_ops *dops = dio->dops; struct kiocb *iocb = dio->iocb; - struct inode *inode = file_inode(iocb->ki_filp); loff_t offset = iocb->ki_pos; ssize_t ret = dio->error; @@ -108,15 +107,8 @@ ssize_t iomap_dio_complete(struct iomap_dio *dio) * ->end_io() when necessary, otherwise a racing buffer read would cache * zeros from unwritten extents. */ - if (!dio->error && dio->size && - (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { - int err; - err = invalidate_inode_pages2_range(inode->i_mapping, - offset >> PAGE_SHIFT, - (offset + dio->size - 1) >> PAGE_SHIFT); - if (err) - dio_warn_stale_pagecache(iocb->ki_filp); - } + if (!dio->error && dio->size && (dio->flags & IOMAP_DIO_WRITE)) + kiocb_invalidate_post_direct_write(iocb, dio->size); inode_dio_end(file_inode(iocb->ki_filp)); diff --git a/include/linux/fs.h b/include/linux/fs.h index 86b50271b4f7..4f196f827d9d 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2843,11 +2843,6 @@ static inline void inode_dio_end(struct inode *inode) wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); } -/* - * Warn about a page cache invalidation failure diring a direct I/O write. - */ -void dio_warn_stale_pagecache(struct file *filp); - extern void inode_set_flags(struct inode *inode, unsigned int flags, unsigned int mask); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 7b66a67dba51..716953ee1ebd 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -31,6 +31,7 @@ int invalidate_inode_pages2(struct address_space *mapping); int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); +void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); diff --git a/mm/filemap.c b/mm/filemap.c index 6ba6233c4bbb..b45506f74133 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3789,7 +3789,7 @@ EXPORT_SYMBOL(read_cache_page_gfp); /* * Warn about a page cache invalidation failure during a direct I/O write. */ -void dio_warn_stale_pagecache(struct file *filp) +static void dio_warn_stale_pagecache(struct file *filp) { static DEFINE_RATELIMIT_STATE(_rs, 86400 * HZ, DEFAULT_RATELIMIT_BURST); char pathname[128]; @@ -3806,19 +3806,23 @@ void dio_warn_stale_pagecache(struct file *filp) } } +void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count) +{ + struct address_space *mapping = iocb->ki_filp->f_mapping; + + if (mapping->nrpages && + invalidate_inode_pages2_range(mapping, + iocb->ki_pos >> PAGE_SHIFT, + (iocb->ki_pos + count - 1) >> PAGE_SHIFT)) + dio_warn_stale_pagecache(iocb->ki_filp); +} + ssize_t generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) { - struct file *file = iocb->ki_filp; - struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - loff_t pos = iocb->ki_pos; - ssize_t written; - size_t write_len; - pgoff_t end; - - write_len = iov_iter_count(from); - end = (pos + write_len - 1) >> PAGE_SHIFT; + struct address_space *mapping = iocb->ki_filp->f_mapping; + size_t write_len = iov_iter_count(from); + ssize_t written; /* * If a page can not be invalidated, return 0 to fall back @@ -3828,7 +3832,7 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) if (written) { if (written == -EBUSY) return 0; - goto out; + return written; } written = mapping->a_ops->direct_IO(iocb, from); @@ -3850,11 +3854,11 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) * * Skip invalidation for async writes or if mapping has no pages. */ - if (written > 0 && mapping->nrpages && - invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end)) - dio_warn_stale_pagecache(file); - if (written > 0) { + struct inode *inode = mapping->host; + loff_t pos = iocb->ki_pos; + + kiocb_invalidate_post_direct_write(iocb, written); pos += written; write_len -= written; if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { @@ -3865,7 +3869,6 @@ generic_file_direct_write(struct kiocb *iocb, struct iov_iter *from) } if (written != -EIOCBQUEUED) iov_iter_revert(from, write_len - iov_iter_count(from)); -out: return written; } EXPORT_SYMBOL(generic_file_direct_write); -- cgit v1.2.3 From 44fff0fa08ec5a6d9d5fb05443a36d854d0ece4d Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 1 Jun 2023 16:59:01 +0200 Subject: fs: factor out a direct_write_fallback helper Add a helper dealing with handling the syncing of a buffered write fallback for direct I/O. Link: https://lkml.kernel.org/r/20230601145904.1385409-10-hch@lst.de Signed-off-by: Christoph Hellwig Reviewed-by: Damien Le Moal Reviewed-by: Miklos Szeredi Reviewed-by: Darrick J. Wong Cc: Al Viro Cc: Andreas Gruenbacher Cc: Anna Schumaker Cc: Chao Yu Cc: Christian Brauner Cc: Hannes Reinecke Cc: Ilya Dryomov Cc: Jaegeuk Kim Cc: Jens Axboe Cc: Johannes Thumshirn Cc: Matthew Wilcox Cc: Miklos Szeredi Cc: Theodore Ts'o Cc: Trond Myklebust Cc: Xiubo Li Signed-off-by: Andrew Morton --- fs/libfs.c | 41 +++++++++++++++++++++++++++++++++ include/linux/fs.h | 2 ++ mm/filemap.c | 66 +++++++++++++----------------------------------------- 3 files changed, 58 insertions(+), 51 deletions(-) (limited to 'mm') diff --git a/fs/libfs.c b/fs/libfs.c index 89cf614a3271..5b851315eeed 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1613,3 +1613,44 @@ u64 inode_query_iversion(struct inode *inode) return cur >> I_VERSION_QUERIED_SHIFT; } EXPORT_SYMBOL(inode_query_iversion); + +ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter, + ssize_t direct_written, ssize_t buffered_written) +{ + struct address_space *mapping = iocb->ki_filp->f_mapping; + loff_t pos = iocb->ki_pos - buffered_written; + loff_t end = iocb->ki_pos - 1; + int err; + + /* + * If the buffered write fallback returned an error, we want to return + * the number of bytes which were written by direct I/O, or the error + * code if that was zero. + * + * Note that this differs from normal direct-io semantics, which will + * return -EFOO even if some bytes were written. + */ + if (unlikely(buffered_written < 0)) { + if (direct_written) + return direct_written; + return buffered_written; + } + + /* + * We need to ensure that the page cache pages are written to disk and + * invalidated to preserve the expected O_DIRECT semantics. + */ + err = filemap_write_and_wait_range(mapping, pos, end); + if (err < 0) { + /* + * We don't know how much we wrote, so just return the number of + * bytes which were direct-written + */ + if (direct_written) + return direct_written; + return err; + } + invalidate_mapping_pages(mapping, pos >> PAGE_SHIFT, end >> PAGE_SHIFT); + return direct_written + buffered_written; +} +EXPORT_SYMBOL_GPL(direct_write_fallback); diff --git a/include/linux/fs.h b/include/linux/fs.h index 4f196f827d9d..c363f8687c7e 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -2744,6 +2744,8 @@ extern ssize_t __generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_write_iter(struct kiocb *, struct iov_iter *); extern ssize_t generic_file_direct_write(struct kiocb *, struct iov_iter *); ssize_t generic_perform_write(struct kiocb *, struct iov_iter *); +ssize_t direct_write_fallback(struct kiocb *iocb, struct iov_iter *iter, + ssize_t direct_written, ssize_t buffered_written); ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos, rwf_t flags); diff --git a/mm/filemap.c b/mm/filemap.c index b45506f74133..916b7c6444fe 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3979,23 +3979,19 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; - struct inode *inode = mapping->host; - ssize_t written = 0; - ssize_t err; - ssize_t status; + struct inode *inode = mapping->host; + ssize_t ret; - err = file_remove_privs(file); - if (err) - goto out; + ret = file_remove_privs(file); + if (ret) + return ret; - err = file_update_time(file); - if (err) - goto out; + ret = file_update_time(file); + if (ret) + return ret; if (iocb->ki_flags & IOCB_DIRECT) { - loff_t pos, endbyte; - - written = generic_file_direct_write(iocb, from); + ret = generic_file_direct_write(iocb, from); /* * If the write stopped short of completing, fall back to * buffered writes. Some filesystems do this for writes to @@ -4003,45 +3999,13 @@ ssize_t __generic_file_write_iter(struct kiocb *iocb, struct iov_iter *from) * not succeed (even if it did, DAX does not handle dirty * page-cache pages correctly). */ - if (written < 0 || !iov_iter_count(from) || IS_DAX(inode)) - goto out; - - pos = iocb->ki_pos; - status = generic_perform_write(iocb, from); - /* - * If generic_perform_write() returned a synchronous error - * then we want to return the number of bytes which were - * direct-written, or the error code if that was zero. Note - * that this differs from normal direct-io semantics, which - * will return -EFOO even if some bytes were written. - */ - if (unlikely(status < 0)) { - err = status; - goto out; - } - /* - * We need to ensure that the page cache pages are written to - * disk and invalidated to preserve the expected O_DIRECT - * semantics. - */ - endbyte = pos + status - 1; - err = filemap_write_and_wait_range(mapping, pos, endbyte); - if (err == 0) { - written += status; - invalidate_mapping_pages(mapping, - pos >> PAGE_SHIFT, - endbyte >> PAGE_SHIFT); - } else { - /* - * We don't know how much we wrote, so just return - * the number of bytes which were direct-written - */ - } - } else { - written = generic_perform_write(iocb, from); + if (ret < 0 || !iov_iter_count(from) || IS_DAX(inode)) + return ret; + return direct_write_fallback(iocb, from, ret, + generic_perform_write(iocb, from)); } -out: - return written ? written : err; + + return generic_perform_write(iocb, from); } EXPORT_SYMBOL(__generic_file_write_iter); -- cgit v1.2.3 From 501b26510ae3bbdf9333b83addcd4e5c4214346d Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Tue, 30 May 2023 11:52:35 -0300 Subject: vmstat: allow_direct_reclaim should use zone_page_state_snapshot A customer provided evidence indicating that a process was stalled in direct reclaim: - The process was trapped in throttle_direct_reclaim(). The function wait_event_killable() was called to wait condition allow_direct_reclaim(pgdat) for current node to be true. The allow_direct_reclaim(pgdat) examined the number of free pages on the node by zone_page_state() which just returns value in zone->vm_stat[NR_FREE_PAGES]. - On node #1, zone->vm_stat[NR_FREE_PAGES] was 0. However, the freelist on this node was not empty. - This inconsistent of vmstat value was caused by percpu vmstat on nohz_full cpus. Every increment/decrement of vmstat is performed on percpu vmstat counter at first, then pooled diffs are cumulated to the zone's vmstat counter in timely manner. However, on nohz_full cpus (in case of this customer's system, 48 of 52 cpus) these pooled diffs were not cumulated once the cpu had no event on it so that the cpu started sleeping infinitely. I checked percpu vmstat and found there were total 69 counts not cumulated to the zone's vmstat counter yet. - In this situation, kswapd did not help the trapped process. In pgdat_balanced(), zone_wakermark_ok_safe() examined the number of free pages on the node by zone_page_state_snapshot() which checks pending counts on percpu vmstat. Therefore kswapd could know there were 69 free pages correctly. Since zone->_watermark = {8, 20, 32}, kswapd did not work because 69 was greater than 32 as high watermark. Change allow_direct_reclaim to use zone_page_state_snapshot, which allows a more precise version of the vmstat counters to be used. allow_direct_reclaim will only be called from try_to_free_pages, which is not a hot path. Testing: Due to difficulties accessing the system, it has not been possible for the reproducer to test the patch (however its clear from available data and analysis that it should fix it). Link: https://lkml.kernel.org/r/20230530145335.677325196@redhat.com Reviewed-by: Michal Hocko Reviewed-by: Aaron Tomlin Signed-off-by: Marcelo Tosatti Cc: Christoph Lameter Cc: Frederic Weisbecker Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/vmscan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 3024b4619b65..5993c0bae162 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6898,7 +6898,7 @@ static bool allow_direct_reclaim(pg_data_t *pgdat) continue; pfmemalloc_reserve += min_wmark_pages(zone); - free_pages += zone_page_state(zone, NR_FREE_PAGES); + free_pages += zone_page_state_snapshot(zone, NR_FREE_PAGES); } /* If there are no reserves (unexpected config) then do not throttle */ -- cgit v1.2.3 From b3f78e74986546a6da5d28c24e627d95d17f79ec Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Fri, 2 Jun 2023 10:29:46 +0100 Subject: mm: vmalloc must set pte via arch code Patch series "Fixes for pte encapsulation bypasses", v3. A series to improve the encapsulation of pte entries by disallowing non-arch code from directly dereferencing pte_t pointers. This patch (of 4): It is bad practice to directly set pte entries within a pte table. Instead all modifications must go through arch-provided helpers such as set_pte_at() to give the arch code visibility and allow it to check (and potentially modify) the operation. Link: https://lkml.kernel.org/r/20230602092949.545577-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20230602092949.545577-2-ryan.roberts@arm.com Fixes: 3e9a9e256b1e ("mm: add a vmap_pfn function") Signed-off-by: Ryan Roberts Reviewed-by: Zi Yan Acked-by: Lorenzo Stoakes Reviewed-by: Christoph Hellwig Reviewed-by: Uladzislau Rezki (Sony) Reviewed-by: Mike Rapoport (IBM) Cc: Kirill A. Shutemov Cc: Matthew Wilcox (Oracle) Cc: SeongJae Park Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/vmalloc.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7c32435219b1..9d64a4098c36 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -2944,10 +2944,16 @@ struct vmap_pfn_data { static int vmap_pfn_apply(pte_t *pte, unsigned long addr, void *private) { struct vmap_pfn_data *data = private; + unsigned long pfn = data->pfns[data->idx]; + pte_t ptent; - if (WARN_ON_ONCE(pfn_valid(data->pfns[data->idx]))) + if (WARN_ON_ONCE(pfn_valid(pfn))) return -EINVAL; - *pte = pte_mkspecial(pfn_pte(data->pfns[data->idx++], data->prot)); + + ptent = pte_mkspecial(pfn_pte(pfn, data->prot)); + set_pte_at(&init_mm, addr, pte, ptent); + + data->idx++; return 0; } -- cgit v1.2.3 From c11d34fa139e4b0fb4249a30f37b178353533fa1 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Fri, 2 Jun 2023 10:29:47 +0100 Subject: mm/damon/ops-common: atomically test and clear young on ptes and pmds It is racy to non-atomically read a pte, then clear the young bit, then write it back as this could discard dirty information. Further, it is bad practice to directly set a pte entry within a table. Instead clearing young must go through the arch-provided helper, ptep_test_and_clear_young() to ensure it is modified atomically and to give the arch code visibility and allow it to check (and potentially modify) the operation. Link: https://lkml.kernel.org/r/20230602092949.545577-3-ryan.roberts@arm.com Fixes: 3f49584b262c ("mm/damon: implement primitives for the virtual memory address spaces"). Signed-off-by: Ryan Roberts Reviewed-by: Zi Yan Reviewed-by: SeongJae Park Reviewed-by: Mike Rapoport (IBM) Cc: Christoph Hellwig Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Uladzislau Rezki (Sony) Cc: Yu Zhao Cc: Signed-off-by: Andrew Morton --- mm/damon/ops-common.c | 16 ++++++---------- mm/damon/ops-common.h | 4 ++-- mm/damon/paddr.c | 4 ++-- mm/damon/vaddr.c | 4 ++-- 4 files changed, 12 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index cc63cf953636..acc264b97903 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -37,7 +37,7 @@ struct folio *damon_get_folio(unsigned long pfn) return folio; } -void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr) +void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) { bool referenced = false; struct folio *folio = damon_get_folio(pte_pfn(*pte)); @@ -45,13 +45,11 @@ void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr) if (!folio) return; - if (pte_young(*pte)) { + if (ptep_test_and_clear_young(vma, addr, pte)) referenced = true; - *pte = pte_mkold(*pte); - } #ifdef CONFIG_MMU_NOTIFIER - if (mmu_notifier_clear_young(mm, addr, addr + PAGE_SIZE)) + if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE)) referenced = true; #endif /* CONFIG_MMU_NOTIFIER */ @@ -62,7 +60,7 @@ void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr) folio_put(folio); } -void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr) +void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE bool referenced = false; @@ -71,13 +69,11 @@ void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr) if (!folio) return; - if (pmd_young(*pmd)) { + if (pmdp_test_and_clear_young(vma, addr, pmd)) referenced = true; - *pmd = pmd_mkold(*pmd); - } #ifdef CONFIG_MMU_NOTIFIER - if (mmu_notifier_clear_young(mm, addr, addr + HPAGE_PMD_SIZE)) + if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + HPAGE_PMD_SIZE)) referenced = true; #endif /* CONFIG_MMU_NOTIFIER */ diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h index 14f4bc69f29b..18d837d11bce 100644 --- a/mm/damon/ops-common.h +++ b/mm/damon/ops-common.h @@ -9,8 +9,8 @@ struct folio *damon_get_folio(unsigned long pfn); -void damon_ptep_mkold(pte_t *pte, struct mm_struct *mm, unsigned long addr); -void damon_pmdp_mkold(pmd_t *pmd, struct mm_struct *mm, unsigned long addr); +void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr); +void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr); int damon_cold_score(struct damon_ctx *c, struct damon_region *r, struct damos *s); diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 467b99166b43..5b3a3463d078 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -24,9 +24,9 @@ static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma, while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) - damon_ptep_mkold(pvmw.pte, vma->vm_mm, addr); + damon_ptep_mkold(pvmw.pte, vma, addr); else - damon_pmdp_mkold(pvmw.pmd, vma->vm_mm, addr); + damon_pmdp_mkold(pvmw.pmd, vma, addr); } return true; } diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 1fec16d7263e..37994fb6120c 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -311,7 +311,7 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, } if (pmd_trans_huge(*pmd)) { - damon_pmdp_mkold(pmd, walk->mm, addr); + damon_pmdp_mkold(pmd, walk->vma, addr); spin_unlock(ptl); return 0; } @@ -323,7 +323,7 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (!pte_present(*pte)) goto out; - damon_ptep_mkold(pte, walk->mm, addr); + damon_ptep_mkold(pte, walk->vma, addr); out: pte_unmap_unlock(pte, ptl); return 0; -- cgit v1.2.3 From fa8c919dac3f5f325b17f9fcf8ac7dd899992598 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Fri, 2 Jun 2023 10:29:48 +0100 Subject: mm/damon/ops-common: refactor to use {pte|pmd}p_clear_young_notify() With the fix in place to atomically test and clear young on ptes and pmds, simplify the code to handle the clearing for both the primary mmu and the mmu notifier with a single API call. Link: https://lkml.kernel.org/r/20230602092949.545577-4-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Acked-by: Yu Zhao Reviewed-by: SeongJae Park Cc: Christoph Hellwig Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: Mike Rapoport (IBM) Cc: Uladzislau Rezki (Sony) Cc: Zi Yan Signed-off-by: Andrew Morton --- mm/damon/ops-common.c | 22 ++-------------------- 1 file changed, 2 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index acc264b97903..d4ab81229136 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -39,21 +39,12 @@ struct folio *damon_get_folio(unsigned long pfn) void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) { - bool referenced = false; struct folio *folio = damon_get_folio(pte_pfn(*pte)); if (!folio) return; - if (ptep_test_and_clear_young(vma, addr, pte)) - referenced = true; - -#ifdef CONFIG_MMU_NOTIFIER - if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + PAGE_SIZE)) - referenced = true; -#endif /* CONFIG_MMU_NOTIFIER */ - - if (referenced) + if (ptep_clear_young_notify(vma, addr, pte)) folio_set_young(folio); folio_set_idle(folio); @@ -63,21 +54,12 @@ void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr void damon_pmdp_mkold(pmd_t *pmd, struct vm_area_struct *vma, unsigned long addr) { #ifdef CONFIG_TRANSPARENT_HUGEPAGE - bool referenced = false; struct folio *folio = damon_get_folio(pmd_pfn(*pmd)); if (!folio) return; - if (pmdp_test_and_clear_young(vma, addr, pmd)) - referenced = true; - -#ifdef CONFIG_MMU_NOTIFIER - if (mmu_notifier_clear_young(vma->vm_mm, addr, addr + HPAGE_PMD_SIZE)) - referenced = true; -#endif /* CONFIG_MMU_NOTIFIER */ - - if (referenced) + if (pmdp_clear_young_notify(vma, addr, pmd)) folio_set_young(folio); folio_set_idle(folio); -- cgit v1.2.3 From 3b65f437d9e8dd696a2b88e7afcd51385532ab35 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Fri, 2 Jun 2023 10:29:49 +0100 Subject: mm: fix failure to unmap pte on highmem systems The loser of a race to service a pte for a device private entry in the swap path previously unlocked the ptl, but failed to unmap the pte. This only affects highmem systems since unmapping a pte is a noop on non-highmem systems. Link: https://lkml.kernel.org/r/20230602092949.545577-5-ryan.roberts@arm.com Fixes: 16ce101db85d ("mm/memory.c: fix race when faulting a device private page") Signed-off-by: Ryan Roberts Reviewed-by: Zi Yan Reviewed-by: Mike Rapoport (IBM) Cc: Christoph Hellwig Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox (Oracle) Cc: SeongJae Park Cc: Uladzislau Rezki (Sony) Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/memory.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 4dd09f930c61..36082fd42df4 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3728,10 +3728,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vmf->page = pfn_swap_entry_to_page(entry); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { - spin_unlock(vmf->ptl); - goto out; - } + if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) + goto unlock; /* * Get a page reference while we know the page can't be -- cgit v1.2.3 From 12dd992accd96c09ec765a3bb4706a3ed24ae5b3 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 3 Jun 2023 19:25:58 +0800 Subject: mm: page_alloc: remove unneeded header files Remove some unneeded header files. No functional change intended. Link: https://lkml.kernel.org/r/20230603112558.213694-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- mm/page_alloc.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 091a6cb44265..6a18f2232e3e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -26,7 +26,6 @@ #include #include #include -#include #include #include #include @@ -36,8 +35,6 @@ #include #include #include -#include -#include #include #include #include @@ -52,7 +49,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 396faf88981917f975773d8589ce4f6e6c679e13 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 3 Jun 2023 15:21:16 +0800 Subject: memcg: use helper macro FLUSH_TIME Use helper macro FLUSH_TIME to indicate the flush time to improve the readability a bit. No functional change intended. Link: https://lkml.kernel.org/r/20230603072116.1101690-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Acked-by: Shakeel Butt Reviewed-by: Muchun Song Reviewed-by: David Hildenbrand Acked-by: Roman Gushchin Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6ee433be4c3b..caf6ab55f8e3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5428,7 +5428,7 @@ static int mem_cgroup_css_online(struct cgroup_subsys_state *css) if (unlikely(mem_cgroup_is_root(memcg))) queue_delayed_work(system_unbound_wq, &stats_flush_dwork, - 2UL*HZ); + FLUSH_TIME); lru_gen_online_memcg(memcg); return 0; offline_kmem: -- cgit v1.2.3 From 5e07472583840308949ce807b11274de15cb79a0 Mon Sep 17 00:00:00 2001 From: Liam Ni Date: Sat, 3 Jun 2023 10:31:16 +0800 Subject: mm/early_ioremap.c: improve the execution efficiency of early_ioremap_setup() Reduce the number of invalid loops of the function early_ioremap_setup() to improve the efficiency of function execution Link: https://lkml.kernel.org/r/CACZJ9cU6t5sLoDwE6_XOg+UJLpZt4+qHfjYN2bA0s+3y9y6pQQ@mail.gmail.com Signed-off-by: LiamNi Signed-off-by: Andrew Morton --- mm/early_ioremap.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c index 9bc12e526ed0..ce06b2884789 100644 --- a/mm/early_ioremap.c +++ b/mm/early_ioremap.c @@ -72,12 +72,10 @@ void __init early_ioremap_setup(void) { int i; - for (i = 0; i < FIX_BTMAPS_SLOTS; i++) - if (WARN_ON(prev_map[i])) - break; - - for (i = 0; i < FIX_BTMAPS_SLOTS; i++) + for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { + WARN_ON_ONCE(prev_map[i]); slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); + } } static int __init check_early_ioremap_leak(void) -- cgit v1.2.3 From 54cbbbf3faf610fb4eba6f8d39d933bcbfc6f4de Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 4 May 2023 22:27:51 +0100 Subject: mm/mmap: separate writenotify and dirty tracking logic MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm/gup: disallow GUP writing to file-backed mappings by default", v9. Writing to file-backed mappings which require folio dirty tracking using GUP is a fundamentally broken operation, as kernel write access to GUP mappings do not adhere to the semantics expected by a file system. A GUP caller uses the direct mapping to access the folio, which does not cause write notify to trigger, nor does it enforce that the caller marks the folio dirty. The problem arises when, after an initial write to the folio, writeback results in the folio being cleaned and then the caller, via the GUP interface, writes to the folio again. As a result of the use of this secondary, direct, mapping to the folio no write notify will occur, and if the caller does mark the folio dirty, this will be done so unexpectedly. For example, consider the following scenario:- 1. A folio is written to via GUP which write-faults the memory, notifying the file system and dirtying the folio. 2. Later, writeback is triggered, resulting in the folio being cleaned and the PTE being marked read-only. 3. The GUP caller writes to the folio, as it is mapped read/write via the direct mapping. 4. The GUP caller, now done with the page, unpins it and sets it dirty (though it does not have to). This change updates both the PUP FOLL_LONGTERM slow and fast APIs. As pin_user_pages_fast_only() does not exist, we can rely on a slightly imperfect whitelisting in the PUP-fast case and fall back to the slow case should this fail. This patch (of 3): vma_wants_writenotify() is specifically intended for setting PTE page table flags, accounting for existing page table flag state and whether the underlying filesystem performs dirty tracking for a file-backed mapping. Everything is predicated firstly on whether the mapping is shared writable, as this is the only instance where dirty tracking is pertinent - MAP_PRIVATE mappings will always be CoW'd and unshared, and read-only file-backed shared mappings cannot be written to, even with FOLL_FORCE. All other checks are in line with existing logic, though now separated into checks eplicitily for dirty tracking and those for determining how to set page table flags. We make this change so we can perform checks in the GUP logic to determine which mappings might be problematic when written to. Link: https://lkml.kernel.org/r/cover.1683235180.git.lstoakes@gmail.com Link: https://lkml.kernel.org/r/0f218370bd49b4e6bbfbb499f7c7b92c26ba1ceb.1683235180.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Reviewed-by: John Hubbard Reviewed-by: Mika Penttilä Reviewed-by: Jan Kara Reviewed-by: Jason Gunthorpe Acked-by: David Hildenbrand Cc: Kirill A . Shutemov Cc: Peter Zijlstra Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 + mm/mmap.c | 58 +++++++++++++++++++++++++++++++++++++++++++----------- 2 files changed, 47 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 62bb3272e531..66032f0d515c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2461,6 +2461,7 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma, #define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ MM_CP_UFFD_WP_RESOLVE) +bool vma_needs_dirty_tracking(struct vm_area_struct *vma); int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) { diff --git a/mm/mmap.c b/mm/mmap.c index e1624cb2c04e..f084b7940431 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1454,6 +1454,48 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) } #endif /* __ARCH_WANT_SYS_OLD_MMAP */ +static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) +{ + return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); +} + +static bool vma_is_shared_writable(struct vm_area_struct *vma) +{ + return (vma->vm_flags & (VM_WRITE | VM_SHARED)) == + (VM_WRITE | VM_SHARED); +} + +static bool vma_fs_can_writeback(struct vm_area_struct *vma) +{ + /* No managed pages to writeback. */ + if (vma->vm_flags & VM_PFNMAP) + return false; + + return vma->vm_file && vma->vm_file->f_mapping && + mapping_can_writeback(vma->vm_file->f_mapping); +} + +/* + * Does this VMA require the underlying folios to have their dirty state + * tracked? + */ +bool vma_needs_dirty_tracking(struct vm_area_struct *vma) +{ + /* Only shared, writable VMAs require dirty tracking. */ + if (!vma_is_shared_writable(vma)) + return false; + + /* Does the filesystem need to be notified? */ + if (vm_ops_needs_writenotify(vma->vm_ops)) + return true; + + /* + * Even if the filesystem doesn't indicate a need for writenotify, if it + * can writeback, dirty tracking is still required. + */ + return vma_fs_can_writeback(vma); +} + /* * Some shared mappings will want the pages marked read-only * to track write events. If so, we'll downgrade vm_page_prot @@ -1462,21 +1504,18 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) */ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) { - vm_flags_t vm_flags = vma->vm_flags; - const struct vm_operations_struct *vm_ops = vma->vm_ops; - /* If it was private or non-writable, the write bit is already clear */ - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) + if (!vma_is_shared_writable(vma)) return 0; /* The backer wishes to know when pages are first written to? */ - if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite)) + if (vm_ops_needs_writenotify(vma->vm_ops)) return 1; /* The open routine did something to the protections that pgprot_modify * won't preserve? */ if (pgprot_val(vm_page_prot) != - pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags))) + pgprot_val(vm_pgprot_modify(vm_page_prot, vma->vm_flags))) return 0; /* @@ -1490,13 +1529,8 @@ int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot) if (userfaultfd_wp(vma)) return 1; - /* Specialty mapping? */ - if (vm_flags & VM_PFNMAP) - return 0; - /* Can the mapping track the dirty pages? */ - return vma->vm_file && vma->vm_file->f_mapping && - mapping_can_writeback(vma->vm_file->f_mapping); + return vma_fs_can_writeback(vma); } /* -- cgit v1.2.3 From 8ac268436e6d52f3bb4d212df9395aec97afaa00 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 4 May 2023 22:27:52 +0100 Subject: mm/gup: disallow FOLL_LONGTERM GUP-nonfast writing to file-backed mappings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Writing to file-backed mappings which require folio dirty tracking using GUP is a fundamentally broken operation, as kernel write access to GUP mappings do not adhere to the semantics expected by a file system. A GUP caller uses the direct mapping to access the folio, which does not cause write notify to trigger, nor does it enforce that the caller marks the folio dirty. The problem arises when, after an initial write to the folio, writeback results in the folio being cleaned and then the caller, via the GUP interface, writes to the folio again. As a result of the use of this secondary, direct, mapping to the folio no write notify will occur, and if the caller does mark the folio dirty, this will be done so unexpectedly. For example, consider the following scenario:- 1. A folio is written to via GUP which write-faults the memory, notifying the file system and dirtying the folio. 2. Later, writeback is triggered, resulting in the folio being cleaned and the PTE being marked read-only. 3. The GUP caller writes to the folio, as it is mapped read/write via the direct mapping. 4. The GUP caller, now done with the page, unpins it and sets it dirty (though it does not have to). This results in both data being written to a folio without writenotify, and the folio being dirtied unexpectedly (if the caller decides to do so). This issue was first reported by Jan Kara [1] in 2018, where the problem resulted in file system crashes. This is only relevant when the mappings are file-backed and the underlying file system requires folio dirty tracking. File systems which do not, such as shmem or hugetlb, are not at risk and therefore can be written to without issue. Unfortunately this limitation of GUP has been present for some time and requires future rework of the GUP API in order to provide correct write access to such mappings. However, for the time being we introduce this check to prevent the most egregious case of this occurring, use of the FOLL_LONGTERM pin. These mappings are considerably more likely to be written to after folios are cleaned and thus simply must not be permitted to do so. This patch changes only the slow-path GUP functions, a following patch adapts the GUP-fast path along similar lines. [1] https://lore.kernel.org/linux-mm/20180103100430.GE4911@quack2.suse.cz/ Link: https://lkml.kernel.org/r/7282506742d2390c125949c2f9894722750bb68a.1683235180.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Suggested-by: Jason Gunthorpe Reviewed-by: John Hubbard Reviewed-by: Mika Penttilä Reviewed-by: Jan Kara Reviewed-by: Jason Gunthorpe Acked-by: David Hildenbrand Cc: Kirill A . Shutemov Cc: Peter Zijlstra Signed-off-by: Andrew Morton --- mm/gup.c | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index 8db58305f4eb..67999dd43d83 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -959,16 +959,54 @@ static int faultin_page(struct vm_area_struct *vma, return 0; } +/* + * Writing to file-backed mappings which require folio dirty tracking using GUP + * is a fundamentally broken operation, as kernel write access to GUP mappings + * do not adhere to the semantics expected by a file system. + * + * Consider the following scenario:- + * + * 1. A folio is written to via GUP which write-faults the memory, notifying + * the file system and dirtying the folio. + * 2. Later, writeback is triggered, resulting in the folio being cleaned and + * the PTE being marked read-only. + * 3. The GUP caller writes to the folio, as it is mapped read/write via the + * direct mapping. + * 4. The GUP caller, now done with the page, unpins it and sets it dirty + * (though it does not have to). + * + * This results in both data being written to a folio without writenotify, and + * the folio being dirtied unexpectedly (if the caller decides to do so). + */ +static bool writable_file_mapping_allowed(struct vm_area_struct *vma, + unsigned long gup_flags) +{ + /* + * If we aren't pinning then no problematic write can occur. A long term + * pin is the most egregious case so this is the case we disallow. + */ + if ((gup_flags & (FOLL_PIN | FOLL_LONGTERM)) != + (FOLL_PIN | FOLL_LONGTERM)) + return true; + + /* + * If the VMA does not require dirty tracking then no problematic write + * can occur either. + */ + return !vma_needs_dirty_tracking(vma); +} + static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) { vm_flags_t vm_flags = vma->vm_flags; int write = (gup_flags & FOLL_WRITE); int foreign = (gup_flags & FOLL_REMOTE); + bool vma_anon = vma_is_anonymous(vma); if (vm_flags & (VM_IO | VM_PFNMAP)) return -EFAULT; - if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma)) + if ((gup_flags & FOLL_ANON) && !vma_anon) return -EFAULT; if ((gup_flags & FOLL_LONGTERM) && vma_is_fsdax(vma)) @@ -978,6 +1016,10 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags) return -EFAULT; if (write) { + if (!vma_anon && + !writable_file_mapping_allowed(vma, gup_flags)) + return -EFAULT; + if (!(vm_flags & VM_WRITE)) { if (!(gup_flags & FOLL_FORCE)) return -EFAULT; -- cgit v1.2.3 From a6e79df92e4a371ba7b751a20e3a31fde0f528b8 Mon Sep 17 00:00:00 2001 From: Lorenzo Stoakes Date: Thu, 4 May 2023 22:27:53 +0100 Subject: mm/gup: disallow FOLL_LONGTERM GUP-fast writing to file-backed mappings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Writing to file-backed dirty-tracked mappings via GUP is inherently broken as we cannot rule out folios being cleaned and then a GUP user writing to them again and possibly marking them dirty unexpectedly. This is especially egregious for long-term mappings (as indicated by the use of the FOLL_LONGTERM flag), so we disallow this case in GUP-fast as we have already done in the slow path. We have access to less information in the fast path as we cannot examine the VMA containing the mapping, however we can determine whether the folio is anonymous or belonging to a whitelisted filesystem - specifically hugetlb and shmem mappings. We take special care to ensure that both the folio and mapping are safe to access when performing these checks and document folio_fast_pin_allowed() accordingly. It's important to note that there are no APIs allowing users to specify FOLL_FAST_ONLY for a PUP-fast let alone with FOLL_LONGTERM, so we can always rely on the fact that if we fail to pin on the fast path, the code will fall back to the slow path which can perform the more thorough check. Link: https://lkml.kernel.org/r/a27d39b87ded7f3dad5fd4181edb106393660453.1683235180.git.lstoakes@gmail.com Signed-off-by: Lorenzo Stoakes Suggested-by: David Hildenbrand Suggested-by: Kirill A . Shutemov Suggested-by: Peter Zijlstra Reviewed-by: Jan Kara Acked-by: David Hildenbrand Cc: Jason Gunthorpe Cc: John Hubbard Cc: Mika Penttilä Signed-off-by: Andrew Morton --- mm/gup.c | 101 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index 67999dd43d83..a718b956edbe 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -2351,6 +2352,82 @@ EXPORT_SYMBOL(get_user_pages_unlocked); */ #ifdef CONFIG_HAVE_FAST_GUP +/* + * Used in the GUP-fast path to determine whether a pin is permitted for a + * specific folio. + * + * This call assumes the caller has pinned the folio, that the lowest page table + * level still points to this folio, and that interrupts have been disabled. + * + * Writing to pinned file-backed dirty tracked folios is inherently problematic + * (see comment describing the writable_file_mapping_allowed() function). We + * therefore try to avoid the most egregious case of a long-term mapping doing + * so. + * + * This function cannot be as thorough as that one as the VMA is not available + * in the fast path, so instead we whitelist known good cases and if in doubt, + * fall back to the slow path. + */ +static bool folio_fast_pin_allowed(struct folio *folio, unsigned int flags) +{ + struct address_space *mapping; + unsigned long mapping_flags; + + /* + * If we aren't pinning then no problematic write can occur. A long term + * pin is the most egregious case so this is the one we disallow. + */ + if ((flags & (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) != + (FOLL_PIN | FOLL_LONGTERM | FOLL_WRITE)) + return true; + + /* The folio is pinned, so we can safely access folio fields. */ + + if (WARN_ON_ONCE(folio_test_slab(folio))) + return false; + + /* hugetlb mappings do not require dirty-tracking. */ + if (folio_test_hugetlb(folio)) + return true; + + /* + * GUP-fast disables IRQs. When IRQS are disabled, RCU grace periods + * cannot proceed, which means no actions performed under RCU can + * proceed either. + * + * inodes and thus their mappings are freed under RCU, which means the + * mapping cannot be freed beneath us and thus we can safely dereference + * it. + */ + lockdep_assert_irqs_disabled(); + + /* + * However, there may be operations which _alter_ the mapping, so ensure + * we read it once and only once. + */ + mapping = READ_ONCE(folio->mapping); + + /* + * The mapping may have been truncated, in any case we cannot determine + * if this mapping is safe - fall back to slow path to determine how to + * proceed. + */ + if (!mapping) + return false; + + /* Anonymous folios pose no problem. */ + mapping_flags = (unsigned long)mapping & PAGE_MAPPING_FLAGS; + if (mapping_flags) + return mapping_flags & PAGE_MAPPING_ANON; + + /* + * At this point, we know the mapping is non-null and points to an + * address_space object. The only remaining whitelisted file system is + * shmem. + */ + return shmem_mapping(mapping); +} + static void __maybe_unused undo_dev_pagemap(int *nr, int nr_start, unsigned int flags, struct page **pages) @@ -2436,6 +2513,11 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, goto pte_unmap; } + if (!folio_fast_pin_allowed(folio, flags)) { + gup_put_folio(folio, 1, flags); + goto pte_unmap; + } + if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) { gup_put_folio(folio, 1, flags); goto pte_unmap; @@ -2628,6 +2710,11 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, return 0; } + if (!folio_fast_pin_allowed(folio, flags)) { + gup_put_folio(folio, refs, flags); + return 0; + } + if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; @@ -2694,6 +2781,10 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, return 0; } + if (!folio_fast_pin_allowed(folio, flags)) { + gup_put_folio(folio, refs, flags); + return 0; + } if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; @@ -2734,6 +2825,11 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, return 0; } + if (!folio_fast_pin_allowed(folio, flags)) { + gup_put_folio(folio, refs, flags); + return 0; + } + if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) { gup_put_folio(folio, refs, flags); return 0; @@ -2774,6 +2870,11 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, return 0; } + if (!folio_fast_pin_allowed(folio, flags)) { + gup_put_folio(folio, refs, flags); + return 0; + } + *nr += refs; folio_set_referenced(folio); return 1; -- cgit v1.2.3 From ce5df7764b3b2abaf3687c460a9a1922daaed5b7 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Fri, 19 May 2023 13:16:52 +0200 Subject: mm: page_isolation: write proper kerneldoc And remove the incorrect header comments. [akpm@linux-foundation.org: s/lower/first/, s/upper/last/, per Mike] Link: https://lkml.kernel.org/r/20230519111652.40658-1-hannes@cmpxchg.org Signed-off-by: Johannes Weiner Cc: Mike Rapoport Signed-off-by: Andrew Morton --- include/linux/page-isolation.h | 24 ++++++------------------ mm/page_isolation.c | 33 ++++++++++++++++++++++++++------- 2 files changed, 32 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 5456b7be38ae..0ab089e89db4 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -37,24 +37,12 @@ void set_pageblock_migratetype(struct page *page, int migratetype); int move_freepages_block(struct zone *zone, struct page *page, int migratetype, int *num_movable); -/* - * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. - */ -int -start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - int migratetype, int flags, gfp_t gfp_flags); - -/* - * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. - * target range is [start_pfn, end_pfn) - */ -void -undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - int migratetype); - -/* - * Test all pages in [start_pfn, end_pfn) are isolated or not. - */ +int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, + int migratetype, int flags, gfp_t gfp_flags); + +void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, + int migratetype); + int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int isol_flags); diff --git a/mm/page_isolation.c b/mm/page_isolation.c index c6f3605e37ab..6599cc965e21 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -481,10 +481,9 @@ failed: } /** - * start_isolate_page_range() - make page-allocation-type of range of pages to - * be MIGRATE_ISOLATE. - * @start_pfn: The lower PFN of the range to be isolated. - * @end_pfn: The upper PFN of the range to be isolated. + * start_isolate_page_range() - mark page range MIGRATE_ISOLATE + * @start_pfn: The first PFN of the range to be isolated. + * @end_pfn: The last PFN of the range to be isolated. * @migratetype: Migrate type to set in error recovery. * @flags: The following flags are allowed (they can be combined in * a bit mask) @@ -571,8 +570,14 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, return 0; } -/* - * Make isolated pages available again. +/** + * undo_isolate_page_range - undo effects of start_isolate_page_range() + * @start_pfn: The first PFN of the isolated range + * @end_pfn: The last PFN of the isolated range + * @migratetype: New migrate type to set on the range + * + * This finds every MIGRATE_ISOLATE page block in the given range + * and switches it to @migratetype. */ void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, int migratetype) @@ -631,7 +636,21 @@ __test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn, return pfn; } -/* Caller should ensure that requested range is in a single zone */ +/** + * test_pages_isolated - check if pageblocks in range are isolated + * @start_pfn: The first PFN of the isolated range + * @end_pfn: The first PFN *after* the isolated range + * @isol_flags: Testing mode flags + * + * This tests if all in the specified range are free. + * + * If %MEMORY_OFFLINE is specified in @flags, it will consider + * poisoned and offlined pages free as well. + * + * Caller must ensure the requested range doesn't span zones. + * + * Returns 0 if true, -EBUSY if one or more pages are in use. + */ int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, int isol_flags) { -- cgit v1.2.3 From 5da226dbfce3a2f44978c2c7cf88166e69a6788b Mon Sep 17 00:00:00 2001 From: Zhaoyang Huang Date: Wed, 31 May 2023 10:51:01 +0800 Subject: mm: skip CMA pages when they are not available This patch fixes unproductive reclaiming of CMA pages by skipping them when they are not available for current context. It arises from the below OOM issue, which was caused by a large proportion of MIGRATE_CMA pages among free pages. [ 36.172486] [03-19 10:05:52.172] ActivityManager: page allocation failure: order:0, mode:0xc00(GFP_NOIO), nodemask=(null),cpuset=foreground,mems_allowed=0 [ 36.189447] [03-19 10:05:52.189] DMA32: 0*4kB 447*8kB (C) 217*16kB (C) 124*32kB (C) 136*64kB (C) 70*128kB (C) 22*256kB (C) 3*512kB (C) 0*1024kB 0*2048kB 0*4096kB = 35848kB [ 36.193125] [03-19 10:05:52.193] Normal: 231*4kB (UMEH) 49*8kB (MEH) 14*16kB (H) 13*32kB (H) 8*64kB (H) 2*128kB (H) 0*256kB 1*512kB (H) 0*1024kB 0*2048kB 0*4096kB = 3236kB ... [ 36.234447] [03-19 10:05:52.234] SLUB: Unable to allocate memory on node -1, gfp=0xa20(GFP_ATOMIC) [ 36.234455] [03-19 10:05:52.234] cache: ext4_io_end, object size: 64, buffer size: 64, default order: 0, min order: 0 [ 36.234459] [03-19 10:05:52.234] node 0: slabs: 53,objs: 3392, free: 0 This change further decreases the chance for wrong OOMs in the presence of a lot of CMA memory. [david@redhat.com: changelog addition] Link: https://lkml.kernel.org/r/1685501461-19290-1-git-send-email-zhaoyang.huang@unisoc.com Signed-off-by: Zhaoyang Huang Acked-by: David Hildenbrand Cc: ke.wang Cc: Matthew Wilcox Cc: Minchan Kim Cc: Suren Baghdasaryan Signed-off-by: Andrew Morton --- mm/vmscan.c | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 5993c0bae162..9048739c41db 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2271,6 +2271,25 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec, } +#ifdef CONFIG_CMA +/* + * It is waste of effort to scan and reclaim CMA pages if it is not available + * for current allocation context. Kswapd can not be enrolled as it can not + * distinguish this scenario by using sc->gfp_mask = GFP_KERNEL + */ +static bool skip_cma(struct folio *folio, struct scan_control *sc) +{ + return !current_is_kswapd() && + gfp_migratetype(sc->gfp_mask) != MIGRATE_MOVABLE && + get_pageblock_migratetype(&folio->page) == MIGRATE_CMA; +} +#else +static bool skip_cma(struct folio *folio, struct scan_control *sc) +{ + return false; +} +#endif + /* * Isolating page from the lruvec to fill in @dst list by nr_to_scan times. * @@ -2317,7 +2336,8 @@ static unsigned long isolate_lru_folios(unsigned long nr_to_scan, nr_pages = folio_nr_pages(folio); total_scan += nr_pages; - if (folio_zonenum(folio) > sc->reclaim_idx) { + if (folio_zonenum(folio) > sc->reclaim_idx || + skip_cma(folio, sc)) { nr_skipped[folio_zonenum(folio)] += nr_pages; move_to = &folios_skipped; goto move; -- cgit v1.2.3 From e5797dc011182f8b25420bc977f37cd92fc6e755 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 6 Jun 2023 20:18:13 +0800 Subject: mm: vmscan: mark kswapd_run() and kswapd_stop() __meminit Add __meminit to kswapd_run() and kswapd_stop() to ensure they're default to __init when memory hotplug is not enabled. Link: https://lkml.kernel.org/r/20230606121813.242163-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Acked-by: Yu Zhao Acked-by: David Hildenbrand Signed-off-by: Andrew Morton --- include/linux/swap.h | 4 ++-- mm/vmscan.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index 2ddbfd85f6c7..b5f6f2916de1 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -460,8 +460,8 @@ static inline bool node_reclaim_enabled(void) void check_move_unevictable_folios(struct folio_batch *fbatch); void check_move_unevictable_pages(struct pagevec *pvec); -extern void kswapd_run(int nid); -extern void kswapd_stop(int nid); +extern void __meminit kswapd_run(int nid); +extern void __meminit kswapd_stop(int nid); #ifdef CONFIG_SWAP diff --git a/mm/vmscan.c b/mm/vmscan.c index 9048739c41db..a4e2936f6d35 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -7871,7 +7871,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim) /* * This kswapd start function will be called by init and node-hot-add. */ -void kswapd_run(int nid) +void __meminit kswapd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); @@ -7892,7 +7892,7 @@ void kswapd_run(int nid) * Called by memory hotplug when all memory in a node is offlined. Caller must * be holding mem_hotplug_begin/done(). */ -void kswapd_stop(int nid) +void __meminit kswapd_stop(int nid) { pg_data_t *pgdat = NODE_DATA(nid); struct task_struct *kswapd; -- cgit v1.2.3 From e6c715abb4ee0ffe291c83e2e3d22866f387cda9 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Tue, 6 Jun 2023 20:07:24 +0800 Subject: memory tier: remove unneeded disable_all_demotion_targets() when !CONFIG_MIGRATION There's no caller of disable_all_demotion_targets() when CONFIG_MIGRATION is disabled. Remove it. Link: https://lkml.kernel.org/r/20230606120724.208552-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Signed-off-by: Andrew Morton --- mm/memory-tiers.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c index e593e56e530b..dd04f0ce5277 100644 --- a/mm/memory-tiers.c +++ b/mm/memory-tiers.c @@ -451,7 +451,6 @@ static void establish_demotion_targets(void) } #else -static inline void disable_all_demotion_targets(void) {} static inline void establish_demotion_targets(void) {} #endif /* CONFIG_MIGRATION */ -- cgit v1.2.3 From ad27ce206af731f6854b3d8a1760c573b217e363 Mon Sep 17 00:00:00 2001 From: ZhangPeng Date: Tue, 6 Jun 2023 14:20:11 +0800 Subject: mm/hugetlb: use a folio in copy_hugetlb_page_range() Patch series "Convert several functions in hugetlb.c to use a folio", v2. This patch series converts three functions in hugetlb.c to use a folio, which can remove several implicit calls to compound_head(). This patch (of 3): We can replace five implict calls to compound_head() with one by using pte_folio. The page we get back is always a head page, so we just convert ptepage to pte_folio. Link: https://lkml.kernel.org/r/20230606062013.2947002-1-zhangpeng362@huawei.com Link: https://lkml.kernel.org/r/20230606062013.2947002-2-zhangpeng362@huawei.com Signed-off-by: ZhangPeng Suggested-by: Matthew Wilcox (Oracle) Reviewed-by: Muchun Song Reviewed-by: Sidhartha Kumar Reviewed-by: Matthew Wilcox (Oracle) Cc: Kefeng Wang Cc: Mike Kravetz Cc: Nanyong Sun Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/hugetlb.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ea24718db4af..d6f6d19958a5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5016,7 +5016,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, struct vm_area_struct *src_vma) { pte_t *src_pte, *dst_pte, entry; - struct page *ptepage; + struct folio *pte_folio; unsigned long addr; bool cow = is_cow_mapping(src_vma->vm_flags); struct hstate *h = hstate_vma(src_vma); @@ -5115,8 +5115,8 @@ again: set_huge_pte_at(dst, addr, dst_pte, entry); } else { entry = huge_ptep_get(src_pte); - ptepage = pte_page(entry); - get_page(ptepage); + pte_folio = page_folio(pte_page(entry)); + folio_get(pte_folio); /* * Failing to duplicate the anon rmap is a rare case @@ -5128,10 +5128,10 @@ again: * need to be without the pgtable locks since we could * sleep during the process. */ - if (!PageAnon(ptepage)) { - page_dup_file_rmap(ptepage, true); - } else if (page_try_dup_anon_rmap(ptepage, true, - src_vma)) { + if (!folio_test_anon(pte_folio)) { + page_dup_file_rmap(&pte_folio->page, true); + } else if (page_try_dup_anon_rmap(&pte_folio->page, + true, src_vma)) { pte_t src_pte_old = entry; struct folio *new_folio; @@ -5140,14 +5140,14 @@ again: /* Do not use reserve as it's private owned */ new_folio = alloc_hugetlb_folio(dst_vma, addr, 1); if (IS_ERR(new_folio)) { - put_page(ptepage); + folio_put(pte_folio); ret = PTR_ERR(new_folio); break; } ret = copy_user_large_folio(new_folio, - page_folio(ptepage), - addr, dst_vma); - put_page(ptepage); + pte_folio, + addr, dst_vma); + folio_put(pte_folio); if (ret) { folio_put(new_folio); break; -- cgit v1.2.3 From 959a78b6dd4526fb11d3cacf2de909479b06a4f4 Mon Sep 17 00:00:00 2001 From: ZhangPeng Date: Tue, 6 Jun 2023 14:20:12 +0800 Subject: mm/hugetlb: use a folio in hugetlb_wp() We can replace nine implict calls to compound_head() with one by using old_folio. The page we get back is always a head page, so we just convert old_page to old_folio. Link: https://lkml.kernel.org/r/20230606062013.2947002-3-zhangpeng362@huawei.com Signed-off-by: ZhangPeng Suggested-by: Matthew Wilcox (Oracle) Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Muchun Song Reviewed-by: Sidhartha Kumar Cc: Kefeng Wang Cc: Mike Kravetz Cc: Nanyong Sun Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/hugetlb.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d6f6d19958a5..e58f8001fd92 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5540,7 +5540,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, const bool unshare = flags & FAULT_FLAG_UNSHARE; pte_t pte = huge_ptep_get(ptep); struct hstate *h = hstate_vma(vma); - struct page *old_page; + struct folio *old_folio; struct folio *new_folio; int outside_reserve = 0; vm_fault_t ret = 0; @@ -5571,7 +5571,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma, return 0; } - old_page = pte_page(pte); + old_folio = page_folio(pte_page(pte)); delayacct_wpcopy_start(); @@ -5580,17 +5580,17 @@ retry_avoidcopy: * If no-one else is actually using this page, we're the exclusive * owner and can reuse this page. */ - if (page_mapcount(old_page) == 1 && PageAnon(old_page)) { - if (!PageAnonExclusive(old_page)) - page_move_anon_rmap(old_page, vma); + if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) { + if (!PageAnonExclusive(&old_folio->page)) + page_move_anon_rmap(&old_folio->page, vma); if (likely(!unshare)) set_huge_ptep_writable(vma, haddr, ptep); delayacct_wpcopy_end(); return 0; } - VM_BUG_ON_PAGE(PageAnon(old_page) && PageAnonExclusive(old_page), - old_page); + VM_BUG_ON_PAGE(folio_test_anon(old_folio) && + PageAnonExclusive(&old_folio->page), &old_folio->page); /* * If the process that created a MAP_PRIVATE mapping is about to @@ -5602,10 +5602,10 @@ retry_avoidcopy: * of the full address range. */ if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && - page_folio(old_page) != pagecache_folio) + old_folio != pagecache_folio) outside_reserve = 1; - get_page(old_page); + folio_get(old_folio); /* * Drop page table lock as buddy allocator may be called. It will @@ -5627,7 +5627,7 @@ retry_avoidcopy: pgoff_t idx; u32 hash; - put_page(old_page); + folio_put(old_folio); /* * Drop hugetlb_fault_mutex and vma_lock before * unmapping. unmapping needs to hold vma_lock @@ -5642,7 +5642,7 @@ retry_avoidcopy: hugetlb_vma_unlock_read(vma); mutex_unlock(&hugetlb_fault_mutex_table[hash]); - unmap_ref_private(mm, vma, old_page, haddr); + unmap_ref_private(mm, vma, &old_folio->page, haddr); mutex_lock(&hugetlb_fault_mutex_table[hash]); hugetlb_vma_lock_read(vma); @@ -5672,7 +5672,7 @@ retry_avoidcopy: goto out_release_all; } - if (copy_user_large_folio(new_folio, page_folio(old_page), address, vma)) { + if (copy_user_large_folio(new_folio, old_folio, address, vma)) { ret = VM_FAULT_HWPOISON_LARGE; goto out_release_all; } @@ -5694,14 +5694,14 @@ retry_avoidcopy: /* Break COW or unshare */ huge_ptep_clear_flush(vma, haddr, ptep); mmu_notifier_invalidate_range(mm, range.start, range.end); - page_remove_rmap(old_page, vma, true); + page_remove_rmap(&old_folio->page, vma, true); hugepage_add_new_anon_rmap(new_folio, vma, haddr); if (huge_pte_uffd_wp(pte)) newpte = huge_pte_mkuffd_wp(newpte); set_huge_pte_at(mm, haddr, ptep, newpte); folio_set_hugetlb_migratable(new_folio); /* Make the old page be freed below */ - new_folio = page_folio(old_page); + new_folio = old_folio; } spin_unlock(ptl); mmu_notifier_invalidate_range_end(&range); @@ -5710,11 +5710,11 @@ out_release_all: * No restore in case of successful pagetable update (Break COW or * unshare) */ - if (new_folio != page_folio(old_page)) + if (new_folio != old_folio) restore_reserve_on_error(h, vma, haddr, new_folio); folio_put(new_folio); out_release_old: - put_page(old_page); + folio_put(old_folio); spin_lock(ptl); /* Caller expects lock to be held */ -- cgit v1.2.3 From 061e62e8180d3fab378a52d868e29ceebe2fe1d2 Mon Sep 17 00:00:00 2001 From: ZhangPeng Date: Tue, 6 Jun 2023 14:20:13 +0800 Subject: mm/hugetlb: use a folio in hugetlb_fault() We can replace seven implicit calls to compound_head() with one by using folio. [akpm@linux-foundation.org: update comment, per Sidhartha] Link: https://lkml.kernel.org/r/20230606062013.2947002-4-zhangpeng362@huawei.com Signed-off-by: ZhangPeng Reviewed-by Sidhartha Kumar Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Muchun Song Cc: Kefeng Wang Cc: Mike Kravetz Cc: Nanyong Sun Cc: Vishal Moola (Oracle) Signed-off-by: Andrew Morton --- mm/hugetlb.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index e58f8001fd92..dfa412d8cb30 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6062,7 +6062,7 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, vm_fault_t ret; u32 hash; pgoff_t idx; - struct page *page = NULL; + struct folio *folio = NULL; struct folio *pagecache_folio = NULL; struct hstate *h = hstate_vma(vma); struct address_space *mapping; @@ -6179,16 +6179,16 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, /* * hugetlb_wp() requires page locks of pte_page(entry) and * pagecache_folio, so here we need take the former one - * when page != pagecache_folio or !pagecache_folio. + * when folio != pagecache_folio or !pagecache_folio. */ - page = pte_page(entry); - if (page_folio(page) != pagecache_folio) - if (!trylock_page(page)) { + folio = page_folio(pte_page(entry)); + if (folio != pagecache_folio) + if (!folio_trylock(folio)) { need_wait_lock = 1; goto out_ptl; } - get_page(page); + folio_get(folio); if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) { if (!huge_pte_write(entry)) { @@ -6204,9 +6204,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, flags & FAULT_FLAG_WRITE)) update_mmu_cache(vma, haddr, ptep); out_put_page: - if (page_folio(page) != pagecache_folio) - unlock_page(page); - put_page(page); + if (folio != pagecache_folio) + folio_unlock(folio); + folio_put(folio); out_ptl: spin_unlock(ptl); @@ -6225,7 +6225,7 @@ out_mutex: * here without taking refcount. */ if (need_wait_lock) - wait_on_page_locked(page); + folio_wait_locked(folio); return ret; } -- cgit v1.2.3 From bd5f79ab39367665f40e10c2486aa15e7a841490 Mon Sep 17 00:00:00 2001 From: Yajun Deng Date: Wed, 7 Jun 2023 10:39:52 +0800 Subject: mm/sparse: remove unused parameters in sparse_remove_section() These parameters ms and map_offset are not used in sparse_remove_section(), so remove them. The __remove_section() is only called by __remove_pages(), remove it. And put the WARN_ON_ONCE() in sparse_remove_section(). Link: https://lkml.kernel.org/r/20230607023952.2247489-1-yajun.deng@linux.dev Signed-off-by: Yajun Deng Reviewed-by: David Hildenbrand Cc: Oscar Salvador Signed-off-by: Andrew Morton --- include/linux/memory_hotplug.h | 5 ++--- mm/memory_hotplug.c | 18 +----------------- mm/sparse.c | 10 +++++++--- 3 files changed, 10 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 04bc286eed42..013c69753c91 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -344,9 +344,8 @@ extern void remove_pfn_range_from_zone(struct zone *zone, extern int sparse_add_section(int nid, unsigned long pfn, unsigned long nr_pages, struct vmem_altmap *altmap, struct dev_pagemap *pgmap); -extern void sparse_remove_section(struct mem_section *ms, - unsigned long pfn, unsigned long nr_pages, - unsigned long map_offset, struct vmem_altmap *altmap); +extern void sparse_remove_section(unsigned long pfn, unsigned long nr_pages, + struct vmem_altmap *altmap); extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum); extern struct zone *zone_for_pfn_range(int online_type, int nid, diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 9061ac69b1b6..8877734b5f2f 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -492,18 +492,6 @@ void __ref remove_pfn_range_from_zone(struct zone *zone, set_zone_contiguous(zone); } -static void __remove_section(unsigned long pfn, unsigned long nr_pages, - unsigned long map_offset, - struct vmem_altmap *altmap) -{ - struct mem_section *ms = __pfn_to_section(pfn); - - if (WARN_ON_ONCE(!valid_section(ms))) - return; - - sparse_remove_section(ms, pfn, nr_pages, map_offset, altmap); -} - /** * __remove_pages() - remove sections of pages * @pfn: starting pageframe (must be aligned to start of a section) @@ -520,9 +508,6 @@ void __remove_pages(unsigned long pfn, unsigned long nr_pages, { const unsigned long end_pfn = pfn + nr_pages; unsigned long cur_nr_pages; - unsigned long map_offset = 0; - - map_offset = vmem_altmap_offset(altmap); if (check_pfn_span(pfn, nr_pages)) { WARN(1, "Misaligned %s start: %#lx end: %#lx\n", __func__, pfn, pfn + nr_pages - 1); @@ -534,8 +519,7 @@ void __remove_pages(unsigned long pfn, unsigned long nr_pages, /* Select all remaining pages up to the next section boundary */ cur_nr_pages = min(end_pfn - pfn, SECTION_ALIGN_UP(pfn + 1) - pfn); - __remove_section(pfn, cur_nr_pages, map_offset, altmap); - map_offset = 0; + sparse_remove_section(pfn, cur_nr_pages, altmap); } } diff --git a/mm/sparse.c b/mm/sparse.c index c2afdb26039e..7a29e10193fe 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -922,10 +922,14 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn, return 0; } -void sparse_remove_section(struct mem_section *ms, unsigned long pfn, - unsigned long nr_pages, unsigned long map_offset, - struct vmem_altmap *altmap) +void sparse_remove_section(unsigned long pfn, unsigned long nr_pages, + struct vmem_altmap *altmap) { + struct mem_section *ms = __pfn_to_section(pfn); + + if (WARN_ON_ONCE(!valid_section(ms))) + return; + section_deactivate(pfn, nr_pages, altmap); } #endif /* CONFIG_MEMORY_HOTPLUG */ -- cgit v1.2.3 From 91ff4d754a1895feb4216e94028edd76cbbc0770 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Wed, 7 Jun 2023 03:24:02 +0000 Subject: mm/mm_init.c: drop 'nid' parameter from check_for_memory() The node_id in pgdat has already been set in free_area_init_node(), so use it internally instead of passing a redundant parameter. Link: https://lkml.kernel.org/r/20230607032402.4679-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Reviewed-by: Mike Rapoport (IBM) Acked-by: Michal Hocko Signed-off-by: Andrew Morton --- mm/mm_init.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/mm_init.c b/mm/mm_init.c index 015355dfdc0b..25a585cb0c0a 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1724,7 +1724,7 @@ static void __init free_area_init_node(int nid) } /* Any regular or high memory on that node ? */ -static void check_for_memory(pg_data_t *pgdat, int nid) +static void check_for_memory(pg_data_t *pgdat) { enum zone_type zone_type; @@ -1732,9 +1732,9 @@ static void check_for_memory(pg_data_t *pgdat, int nid) struct zone *zone = &pgdat->node_zones[zone_type]; if (populated_zone(zone)) { if (IS_ENABLED(CONFIG_HIGHMEM)) - node_set_state(nid, N_HIGH_MEMORY); + node_set_state(pgdat->node_id, N_HIGH_MEMORY); if (zone_type <= ZONE_NORMAL) - node_set_state(nid, N_NORMAL_MEMORY); + node_set_state(pgdat->node_id, N_NORMAL_MEMORY); break; } } @@ -1886,7 +1886,7 @@ void __init free_area_init(unsigned long *max_zone_pfn) /* Any memory on that node */ if (pgdat->node_present_pages) node_set_state(nid, N_MEMORY); - check_for_memory(pgdat, nid); + check_for_memory(pgdat); } memmap_init(); -- cgit v1.2.3 From 36ce9d76b0a93bae799e27e4f5ac35478c676592 Mon Sep 17 00:00:00 2001 From: Roberto Sassu Date: Wed, 7 Jun 2023 18:15:23 +0200 Subject: shmem: use ramfs_kill_sb() for kill_sb method of ramfs-based tmpfs As the ramfs-based tmpfs uses ramfs_init_fs_context() for the init_fs_context method, which allocates fc->s_fs_info, use ramfs_kill_sb() to free it and avoid a memory leak. Link: https://lkml.kernel.org/r/20230607161523.2876433-1-roberto.sassu@huaweicloud.com Fixes: c3b1b1cbf002 ("ramfs: add support for "mode=" mount option") Signed-off-by: Roberto Sassu Cc: Hugh Dickins Cc: David Howells Cc: Al Viro Cc: Signed-off-by: Andrew Morton --- fs/ramfs/inode.c | 2 +- include/linux/ramfs.h | 1 + mm/shmem.c | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/fs/ramfs/inode.c b/fs/ramfs/inode.c index 5ba580c78835..fef477c78107 100644 --- a/fs/ramfs/inode.c +++ b/fs/ramfs/inode.c @@ -278,7 +278,7 @@ int ramfs_init_fs_context(struct fs_context *fc) return 0; } -static void ramfs_kill_sb(struct super_block *sb) +void ramfs_kill_sb(struct super_block *sb) { kfree(sb->s_fs_info); kill_litter_super(sb); diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h index 917528d102c4..d506dc63dd47 100644 --- a/include/linux/ramfs.h +++ b/include/linux/ramfs.h @@ -7,6 +7,7 @@ struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir, umode_t mode, dev_t dev); extern int ramfs_init_fs_context(struct fs_context *fc); +extern void ramfs_kill_sb(struct super_block *sb); #ifdef CONFIG_MMU static inline int diff --git a/mm/shmem.c b/mm/shmem.c index 5e54ab5f61f2..c606ab89693a 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4199,7 +4199,7 @@ static struct file_system_type shmem_fs_type = { .name = "tmpfs", .init_fs_context = ramfs_init_fs_context, .parameters = ramfs_fs_parameters, - .kill_sb = kill_litter_super, + .kill_sb = ramfs_kill_sb, .fs_flags = FS_USERNS_MOUNT, }; -- cgit v1.2.3 From a668968f84265e698a122656c433809ab9f023fa Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Wed, 7 Jun 2023 02:45:48 +0000 Subject: mm/memory_hotplug: remove reset_node_managed_pages() in hotadd_init_pgdat() managed pages has already been set to 0 in free_area_init_core_hotplug(), via zone_init_internals() on each zone. It's pointless to reset again. Furthermore, reset_node_managed_pages() no longer needs to be exposed outside of mm/memblock.c. Remove declaration in include/linux/memblock.h and define it as static. In addtion to this, the only caller of reset_node_managed_pages() is reset_all_zones_managed_pages(), which is annotated with __init, so it should be safe to also mark reset_node_managed_pages() as __init. Link: https://lkml.kernel.org/r/20230607024548.1240-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: David Hildenbrand Cc: Michal Hocko Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Signed-off-by: Andrew Morton --- include/linux/memblock.h | 1 - mm/memblock.c | 2 +- mm/memory_hotplug.c | 1 - 3 files changed, 1 insertion(+), 3 deletions(-) (limited to 'mm') diff --git a/include/linux/memblock.h b/include/linux/memblock.h index f82ee3fac1cd..f71ff9f0ec81 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -128,7 +128,6 @@ int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); void memblock_free_all(void); void memblock_free(void *ptr, size_t size); -void reset_node_managed_pages(pg_data_t *pgdat); void reset_all_zones_managed_pages(void); /* Low level functions */ diff --git a/mm/memblock.c b/mm/memblock.c index 3feafea06ab2..da4264528e1e 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -2122,7 +2122,7 @@ static unsigned long __init free_low_memory_core_early(void) static int reset_managed_pages_done __initdata; -void reset_node_managed_pages(pg_data_t *pgdat) +static void __init reset_node_managed_pages(pg_data_t *pgdat) { struct zone *z; diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 8877734b5f2f..5248323fc0f7 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1194,7 +1194,6 @@ static pg_data_t __ref *hotadd_init_pgdat(int nid) * online_pages() and offline_pages(). * TODO: should be in free_area_init_core_hotplug? */ - reset_node_managed_pages(pgdat); reset_node_present_pages(pgdat); return pgdat; -- cgit v1.2.3 From 32b6a4a1745a46918f748f6fb7641e588fbec6f2 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Wed, 7 Jun 2023 02:50:56 +0000 Subject: mm/mm_init.c: remove reset_node_present_pages() reset_node_present_pages() only get called in hotadd_init_pgdat(), move the action that clear present pages to free_area_init_core_hotplug(), so the helper can be removed. Link: https://lkml.kernel.org/r/20230607025056.1348-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Suggested-by: David Hildenbrand Cc: Michal Hocko Cc: Mike Rapoport (IBM) Cc: Oscar Salvador Signed-off-by: Andrew Morton --- mm/memory_hotplug.c | 18 ------------------ mm/mm_init.c | 15 +++++++++++++-- 2 files changed, 13 insertions(+), 20 deletions(-) (limited to 'mm') diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 5248323fc0f7..35db4108bb15 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1156,16 +1156,6 @@ failed_addition: return ret; } -static void reset_node_present_pages(pg_data_t *pgdat) -{ - struct zone *z; - - for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) - z->present_pages = 0; - - pgdat->node_present_pages = 0; -} - /* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ static pg_data_t __ref *hotadd_init_pgdat(int nid) { @@ -1188,14 +1178,6 @@ static pg_data_t __ref *hotadd_init_pgdat(int nid) */ build_all_zonelists(pgdat); - /* - * When memory is hot-added, all the memory is in offline state. So - * clear all zones' present_pages because they will be updated in - * online_pages() and offline_pages(). - * TODO: should be in free_area_init_core_hotplug? - */ - reset_node_present_pages(pgdat); - return pgdat; } diff --git a/mm/mm_init.c b/mm/mm_init.c index 25a585cb0c0a..122e9bf3fa73 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1509,6 +1509,8 @@ void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) pgdat->kswapd_order = 0; pgdat->kswapd_highest_zoneidx = 0; pgdat->node_start_pfn = 0; + pgdat->node_present_pages = 0; + for_each_online_cpu(cpu) { struct per_cpu_nodestat *p; @@ -1516,8 +1518,17 @@ void __ref free_area_init_core_hotplug(struct pglist_data *pgdat) memset(p, 0, sizeof(*p)); } - for (z = 0; z < MAX_NR_ZONES; z++) - zone_init_internals(&pgdat->node_zones[z], z, nid, 0); + /* + * When memory is hot-added, all the memory is in offline state. So + * clear all zones' present_pages and managed_pages because they will + * be updated in online_pages() and offline_pages(). + */ + for (z = 0; z < MAX_NR_ZONES; z++) { + struct zone *zone = pgdat->node_zones + z; + + zone->present_pages = 0; + zone_init_internals(zone, z, nid, 0); + } } #endif -- cgit v1.2.3 From b9c91c43412f2e07a5287dfe7027acdd8fb0b1ef Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 7 Jun 2023 19:51:43 +0000 Subject: mm: zswap: support exclusive loads Commit 71024cb4a0bf ("frontswap: remove frontswap_tmem_exclusive_gets") removed support for exclusive loads from frontswap as it was not used. Bring back exclusive loads support to frontswap by adding an "exclusive" output parameter to frontswap_ops->load. On the zswap side, add a module parameter to enable/disable exclusive loads, and a config option to control the boot default value. Refactor zswap entry invalidation in zswap_frontswap_invalidate_page() into zswap_invalidate_entry() to reuse it in zswap_frontswap_load() if exclusive loads are enabled. With exclusive loads, we avoid having two copies of the same page in memory (compressed & uncompressed) after faulting it in from zswap. On the other hand, if the page is to be reclaimed again without being dirtied, it will be re-compressed. Compression is not usually slow, and a page that was just faulted in is less likely to be reclaimed again soon. Link: https://lkml.kernel.org/r/20230607195143.1473802-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed Suggested-by: Yu Zhao Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Domenico Cerasuolo Cc: Konrad Rzeszutek Wilk Cc: Nhat Pham Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- include/linux/frontswap.h | 2 +- mm/Kconfig | 16 ++++++++++++++++ mm/frontswap.c | 10 ++++++++-- mm/zswap.c | 28 ++++++++++++++++++++-------- 4 files changed, 45 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index a631bac12220..eaa0ac5f9003 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h @@ -10,7 +10,7 @@ struct frontswap_ops { void (*init)(unsigned); /* this swap type was just swapon'ed */ int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ - int (*load)(unsigned, pgoff_t, struct page *); /* load a page */ + int (*load)(unsigned, pgoff_t, struct page *, bool *); /* load a page */ void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */ void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */ }; diff --git a/mm/Kconfig b/mm/Kconfig index 7672a22647b4..12f32f8d26bf 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -46,6 +46,22 @@ config ZSWAP_DEFAULT_ON The selection made here can be overridden by using the kernel command line 'zswap.enabled=' option. +config ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON + bool "Invalidate zswap entries when pages are loaded" + depends on ZSWAP + help + If selected, exclusive loads for zswap will be enabled at boot, + otherwise it will be disabled. + + If exclusive loads are enabled, when a page is loaded from zswap, + the zswap entry is invalidated at once, as opposed to leaving it + in zswap until the swap entry is freed. + + This avoids having two copies of the same page in memory + (compressed and uncompressed) after faulting in a page from zswap. + The cost is that if the page was never dirtied and needs to be + swapped out again, it will be re-compressed. + choice prompt "Default compressor" depends on ZSWAP diff --git a/mm/frontswap.c b/mm/frontswap.c index 279e55b4ed87..2fb5df3384b8 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -206,6 +206,7 @@ int __frontswap_load(struct page *page) int type = swp_type(entry); struct swap_info_struct *sis = swap_info[type]; pgoff_t offset = swp_offset(entry); + bool exclusive = false; VM_BUG_ON(!frontswap_ops); VM_BUG_ON(!PageLocked(page)); @@ -215,9 +216,14 @@ int __frontswap_load(struct page *page) return -1; /* Try loading from each implementation, until one succeeds. */ - ret = frontswap_ops->load(type, offset, page); - if (ret == 0) + ret = frontswap_ops->load(type, offset, page, &exclusive); + if (ret == 0) { inc_frontswap_loads(); + if (exclusive) { + SetPageDirty(page); + __frontswap_clear(sis, offset); + } + } return ret; } diff --git a/mm/zswap.c b/mm/zswap.c index bcb82e09eb64..9fa86265f6dd 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -138,6 +138,10 @@ static bool zswap_non_same_filled_pages_enabled = true; module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled, bool, 0644); +static bool zswap_exclusive_loads_enabled = IS_ENABLED( + CONFIG_ZSWAP_EXCLUSIVE_LOADS_DEFAULT_ON); +module_param_named(exclusive_loads, zswap_exclusive_loads_enabled, bool, 0644); + /********************************* * data structures **********************************/ @@ -1340,12 +1344,22 @@ shrink: goto reject; } +static void zswap_invalidate_entry(struct zswap_tree *tree, + struct zswap_entry *entry) +{ + /* remove from rbtree */ + zswap_rb_erase(&tree->rbroot, entry); + + /* drop the initial reference from entry creation */ + zswap_entry_put(tree, entry); +} + /* * returns 0 if the page was successfully decompressed * return -1 on entry not found or error */ static int zswap_frontswap_load(unsigned type, pgoff_t offset, - struct page *page) + struct page *page, bool *exclusive) { struct zswap_tree *tree = zswap_trees[type]; struct zswap_entry *entry; @@ -1415,6 +1429,10 @@ stats: freeentry: spin_lock(&tree->lock); zswap_entry_put(tree, entry); + if (!ret && zswap_exclusive_loads_enabled) { + zswap_invalidate_entry(tree, entry); + *exclusive = true; + } spin_unlock(&tree->lock); return ret; @@ -1434,13 +1452,7 @@ static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset) spin_unlock(&tree->lock); return; } - - /* remove from rbtree */ - zswap_rb_erase(&tree->rbroot, entry); - - /* drop the initial reference from entry creation */ - zswap_entry_put(tree, entry); - + zswap_invalidate_entry(tree, entry); spin_unlock(&tree->lock); } -- cgit v1.2.3 From be5e015d107d5336f298b74ea5a4f0b1773bc6f9 Mon Sep 17 00:00:00 2001 From: Marcelo Tosatti Date: Wed, 7 Jun 2023 17:28:07 -0300 Subject: vmstat: skip periodic vmstat update for isolated CPUs Problem: The interruption caused by vmstat_update is undesirable for certain applications. With workloads that are running on isolated cpus with nohz full mode to shield off any kernel interruption. For example, a VM running a time sensitive application with a 50us maximum acceptable interruption (use case: soft PLC). oslat 1094.456862: sys_mlock(start: 7f7ed0000b60, len: 1000) oslat 1094.456971: workqueue_queue_work: ... function=vmstat_update ... oslat 1094.456974: sched_switch: prev_comm=oslat ... ==> next_comm=kworker/5:1 ... kworker 1094.456978: sched_switch: prev_comm=kworker/5:1 ==> next_comm=oslat ... The example above shows an additional 7us for the oslat -> kworker -> oslat switches. In the case of a virtualized CPU, and the vmstat_update interruption in the host (of a qemu-kvm vcpu), the latency penalty observed in the guest is higher than 50us, violating the acceptable latency threshold. The isolated vCPU can perform operations that modify per-CPU page counters, for example to complete I/O operations: CPU 11/KVM-9540 [001] dNh1. 2314.248584: mod_zone_page_state <-__folio_end_writeback CPU 11/KVM-9540 [001] dNh1. 2314.248585: => 0xffffffffc042b083 => mod_zone_page_state => __folio_end_writeback => folio_end_writeback => iomap_finish_ioend => blk_mq_end_request_batch => nvme_irq => __handle_irq_event_percpu => handle_irq_event => handle_edge_irq => __common_interrupt => common_interrupt => asm_common_interrupt => vmx_do_interrupt_nmi_irqoff => vmx_handle_exit_irqoff => vcpu_enter_guest => vcpu_run => kvm_arch_vcpu_ioctl_run => kvm_vcpu_ioctl => __x64_sys_ioctl => do_syscall_64 => entry_SYSCALL_64_after_hwframe In kernel users of vmstat counters either require the precise value and they are using zone_page_state_snapshot interface or they can live with an imprecision as the regular flushing can happen at arbitrary time and cumulative error can grow (see calculate_normal_threshold). From that POV the regular flushing can be postponed for CPUs that have been isolated from the kernel interference without critical infrastructure ever noticing. Skip regular flushing from vmstat_shepherd for all isolated CPUs to avoid interference with the isolated workload. Suggested by Michal Hocko. Link: https://lkml.kernel.org/r/ZIDoV/zxFKVmQl7W@tpad Signed-off-by: Marcelo Tosatti Acked-by: Michal Hocko Cc: Frederic Weisbecker Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/vmstat.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'mm') diff --git a/mm/vmstat.c b/mm/vmstat.c index c28046371b45..c7499e3ee9d5 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -28,6 +28,7 @@ #include #include #include +#include #include "internal.h" @@ -2022,6 +2023,20 @@ static void vmstat_shepherd(struct work_struct *w) for_each_online_cpu(cpu) { struct delayed_work *dw = &per_cpu(vmstat_work, cpu); + /* + * In kernel users of vmstat counters either require the precise value and + * they are using zone_page_state_snapshot interface or they can live with + * an imprecision as the regular flushing can happen at arbitrary time and + * cumulative error can grow (see calculate_normal_threshold). + * + * From that POV the regular flushing can be postponed for CPUs that have + * been isolated from the kernel interference without critical + * infrastructure ever noticing. Skip regular flushing from vmstat_shepherd + * for all isolated CPUs to avoid interference with the isolated workload. + */ + if (cpu_is_isolated(cpu)) + continue; + if (!delayed_work_pending(dw) && need_update(cpu)) queue_delayed_work_on(cpu, mm_percpu_wq, dw, 0); -- cgit v1.2.3 From e3b7bf972d632288ccad95b116628e3141be676e Mon Sep 17 00:00:00 2001 From: Tarun Sahu Date: Fri, 9 Jun 2023 21:59:07 +0530 Subject: mm/folio: avoid special handling for order value 0 in folio_set_order folio_set_order(folio, 0) is used in kernel at two places __destroy_compound_gigantic_folio and __prep_compound_gigantic_folio. Currently, It is called to clear out the folio->_folio_nr_pages and folio->_folio_order. For __destroy_compound_gigantic_folio: In past, folio_set_order(folio, 0) was needed because page->mapping used to overlap with _folio_nr_pages and _folio_order. So if these fields were left uncleared during freeing gigantic hugepages, they were causing "BUG: bad page state" due to non-zero page->mapping. Now, After Commit a01f43901cfb ("hugetlb: be sure to free demoted CMA pages to CMA") page->mapping has explicitly been cleared out for tail pages. Also, _folio_order and _folio_nr_pages no longer overlaps with page->mapping. So, folio_set_order(folio, 0) can be removed from freeing gigantic folio path (__destroy_compound_gigantic_folio). Another place, folio_set_order(folio, 0) is called inside __prep_compound_gigantic_folio during error path. Here, folio_set_order(folio, 0) can also be removed if we move folio_set_order(folio, order) after for loop. The patch also moves _folio_set_head call in __prep_compound_gigantic_folio() such that we avoid clearing them in the error path. Also, as Mike pointed out: "It would actually be better to move the calls _folio_set_head and folio_set_order in __prep_compound_gigantic_folio() as suggested here. Why? In the current code, the ref count on the 'head page' is still 1 (or more) while those calls are made. So, someone could take a speculative ref on the page BEFORE the tail pages are set up." This way, folio_set_order(folio, 0) is no more needed. And it will also helps removing the confusion of folio order being set to 0 (as _folio_order field is part of first tail page). Testing: I have run LTP tests, which all passes. and also I have written the test in LTP which tests the bug caused by compound_nr and page->mapping overlapping. https://github.com/linux-test-project/ltp/blob/master/testcases/kernel/mem/hugetlb/hugemmap/hugemmap32.c Running on older kernel ( < 5.10-rc7) with the above bug this fails while on newer kernel and, also with this patch it passes. Link: https://lkml.kernel.org/r/20230609162907.111756-1-tsahu@linux.ibm.com Signed-off-by: Tarun Sahu Reviewed-by: Mike Kravetz Cc: Aneesh Kumar K.V Cc: Gerald Schaefer Cc: Matthew Wilcox Cc: Muchun Song Cc: Sidhartha Kumar Signed-off-by: Andrew Morton --- mm/hugetlb.c | 9 +++------ mm/internal.h | 8 ++------ 2 files changed, 5 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index dfa412d8cb30..de96cd936b12 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1489,7 +1489,6 @@ static void __destroy_compound_gigantic_folio(struct folio *folio, set_page_refcounted(p); } - folio_set_order(folio, 0); __folio_clear_head(folio); } @@ -1951,9 +1950,6 @@ static bool __prep_compound_gigantic_folio(struct folio *folio, struct page *p; __folio_clear_reserved(folio); - __folio_set_head(folio); - /* we rely on prep_new_hugetlb_folio to set the destructor */ - folio_set_order(folio, order); for (i = 0; i < nr_pages; i++) { p = folio_page(folio, i); @@ -1999,6 +1995,9 @@ static bool __prep_compound_gigantic_folio(struct folio *folio, if (i != 0) set_compound_head(p, &folio->page); } + __folio_set_head(folio); + /* we rely on prep_new_hugetlb_folio to set the destructor */ + folio_set_order(folio, order); atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); atomic_set(&folio->_pincount, 0); @@ -2017,8 +2016,6 @@ out_error: p = folio_page(folio, j); __ClearPageReserved(p); } - folio_set_order(folio, 0); - __folio_clear_head(folio); return false; } diff --git a/mm/internal.h b/mm/internal.h index f45f5eb4514f..faf0508d89a5 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -438,16 +438,12 @@ int split_free_page(struct page *free_page, */ static inline void folio_set_order(struct folio *folio, unsigned int order) { - if (WARN_ON_ONCE(!folio_test_large(folio))) + if (WARN_ON_ONCE(!order || !folio_test_large(folio))) return; folio->_folio_order = order; #ifdef CONFIG_64BIT - /* - * When hugetlb dissolves a folio, we need to clear the tail - * page, rather than setting nr_pages to 1. - */ - folio->_folio_nr_pages = order ? 1U << order : 0; + folio->_folio_nr_pages = 1U << order; #endif } -- cgit v1.2.3 From 0e4bc271110e0c58c010071a9bbf150f39851dac Mon Sep 17 00:00:00 2001 From: Lu Hongfei Date: Fri, 9 Jun 2023 17:30:57 +0800 Subject: mm/vmalloc: replace the ternary conditional operator with min() It would be better to replace the traditional ternary conditional operator with min() in zero_iter Link: https://lkml.kernel.org/r/20230609093057.27777-1-luhongfei@vivo.com Signed-off-by: Lu Hongfei Reviewed-by: Lorenzo Stoakes Reviewed-by: David Hildenbrand Cc: Christoph Hellwig Cc: Uladzislau Rezki (Sony) Signed-off-by: Andrew Morton --- mm/vmalloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 9d64a4098c36..d07d0140ebd3 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -3562,7 +3562,7 @@ static size_t zero_iter(struct iov_iter *iter, size_t count) while (remains > 0) { size_t num, copied; - num = remains < PAGE_SIZE ? remains : PAGE_SIZE; + num = min_t(size_t, remains, PAGE_SIZE); copied = copy_page_to_iter_nofault(ZERO_PAGE(0), 0, num, iter); remains -= copied; -- cgit v1.2.3 From 26e1a0c3277d7f43856ec424902423be212cc178 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:06:53 -0700 Subject: mm: use pmdp_get_lockless() without surplus barrier() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "mm: allow pte_offset_map[_lock]() to fail", v2. What is it all about? Some mmap_lock avoidance i.e. latency reduction. Initially just for the case of collapsing shmem or file pages to THPs; but likely to be relied upon later in other contexts e.g. freeing of empty page tables (but that's not work I'm doing). mmap_write_lock avoidance when collapsing to anon THPs? Perhaps, but again that's not work I've done: a quick attempt was not as easy as the shmem/file case. I would much prefer not to have to make these small but wide-ranging changes for such a niche case; but failed to find another way, and have heard that shmem MADV_COLLAPSE's usefulness is being limited by that mmap_write_lock it currently requires. These changes (though of course not these exact patches) have been in Google's data centre kernel for three years now: we do rely upon them. What is this preparatory series about? The current mmap locking will not be enough to guard against that tricky transition between pmd entry pointing to page table, and empty pmd entry, and pmd entry pointing to huge page: pte_offset_map() will have to validate the pmd entry for itself, returning NULL if no page table is there. What to do about that varies: sometimes nearby error handling indicates just to skip it; but in many cases an ACTION_AGAIN or "goto again" is appropriate (and if that risks an infinite loop, then there must have been an oops, or pfn 0 mistaken for page table, before). Given the likely extension to freeing empty page tables, I have not limited this set of changes to a THP config; and it has been easier, and sets a better example, if each site is given appropriate handling: even where deeper study might prove that failure could only happen if the pmd table were corrupted. Several of the patches are, or include, cleanup on the way; and by the end, pmd_trans_unstable() and suchlike are deleted: pte_offset_map() and pte_offset_map_lock() then handle those original races and more. Most uses of pte_lockptr() are deprecated, with pte_offset_map_nolock() taking its place. This patch (of 32): Use pmdp_get_lockless() in preference to READ_ONCE(*pmdp), to get a more reliable result with PAE (or READ_ONCE as before without PAE); and remove the unnecessary extra barrier()s which got left behind in its callers. HOWEVER: Note the small print in linux/pgtable.h, where it was designed specifically for fast GUP, and depends on interrupts being disabled for its full guarantee: most callers which have been added (here and before) do NOT have interrupts disabled, so there is still some need for caution. Link: https://lkml.kernel.org/r/f35279a9-9ac0-de22-d245-591afbfb4dc@google.com Signed-off-by: Hugh Dickins Acked-by: Yu Zhao Acked-by: Peter Xu Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Zack Rusin Signed-off-by: Andrew Morton --- fs/userfaultfd.c | 10 +--------- include/linux/pgtable.h | 17 ----------------- mm/gup.c | 6 +----- mm/hmm.c | 2 +- mm/khugepaged.c | 5 ----- mm/ksm.c | 3 +-- mm/memory.c | 14 ++------------ mm/mprotect.c | 5 ----- mm/page_vma_mapped.c | 2 +- 9 files changed, 7 insertions(+), 57 deletions(-) (limited to 'mm') diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 0fd96d6e39ce..f7a0817b1ec0 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -349,15 +349,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, if (!pud_present(*pud)) goto out; pmd = pmd_offset(pud, address); - /* - * READ_ONCE must function as a barrier with narrower scope - * and it must be equivalent to: - * _pmd = *pmd; barrier(); - * - * This is to deal with the instability (as in - * pmd_trans_unstable) of the pmd. - */ - _pmd = READ_ONCE(*pmd); + _pmd = pmdp_get_lockless(pmd); if (pmd_none(_pmd)) goto out; diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index c5a51481bbb9..8ec27fe69dc8 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1344,23 +1344,6 @@ static inline int pud_trans_unstable(pud_t *pud) static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) { pmd_t pmdval = pmdp_get_lockless(pmd); - /* - * The barrier will stabilize the pmdval in a register or on - * the stack so that it will stop changing under the code. - * - * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE, - * pmdp_get_lockless is allowed to return a not atomic pmdval - * (for example pointing to an hugepage that has never been - * mapped in the pmd). The below checks will only care about - * the low part of the pmd with 32bit PAE x86 anyway, with the - * exception of pmd_none(). So the important thing is that if - * the low part of the pmd is found null, the high part will - * be also null or the pmd_none() check below would be - * confused. - */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - barrier(); -#endif /* * !pmd_present() checks for pmd migration entries * diff --git a/mm/gup.c b/mm/gup.c index a718b956edbe..d448fd286b8c 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -654,11 +654,7 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; pmd = pmd_offset(pudp, address); - /* - * The READ_ONCE() will stabilize the pmdval in a register or - * on the stack so that it will stop changing under the code. - */ - pmdval = READ_ONCE(*pmd); + pmdval = pmdp_get_lockless(pmd); if (pmd_none(pmdval)) return no_page_table(vma, flags); if (!pmd_present(pmdval)) diff --git a/mm/hmm.c b/mm/hmm.c index 6a151c09de5e..e23043345615 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -332,7 +332,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp, pmd_t pmd; again: - pmd = READ_ONCE(*pmdp); + pmd = pmdp_get_lockless(pmdp); if (pmd_none(pmd)) return hmm_vma_walk_hole(start, end, -1, walk); diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 3649ba12a235..2d206e62d358 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -959,11 +959,6 @@ static int find_pmd_or_thp_or_none(struct mm_struct *mm, return SCAN_PMD_NULL; pmde = pmdp_get_lockless(*pmd); - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - /* See comments in pmd_none_or_trans_huge_or_clear_bad() */ - barrier(); -#endif if (pmd_none(pmde)) return SCAN_PMD_NONE; if (!pmd_present(pmde)) diff --git a/mm/ksm.c b/mm/ksm.c index 0156bded3a66..df2aa281d49d 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -1194,8 +1194,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, * without holding anon_vma lock for write. So when looking for a * genuine pmde (in which to find pte), test present and !THP together. */ - pmde = *pmd; - barrier(); + pmde = pmdp_get_lockless(pmd); if (!pmd_present(pmde) || pmd_trans_huge(pmde)) goto out; diff --git a/mm/memory.c b/mm/memory.c index 36082fd42df4..221b21623644 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4923,18 +4923,9 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) * So now it's safe to run pte_offset_map(). */ vmf->pte = pte_offset_map(vmf->pmd, vmf->address); - vmf->orig_pte = *vmf->pte; + vmf->orig_pte = ptep_get_lockless(vmf->pte); vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; - /* - * some architectures can have larger ptes than wordsize, - * e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and - * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic - * accesses. The code below just needs a consistent view - * for the ifs and we later double check anyway with the - * ptl lock held. So here a barrier will do. - */ - barrier(); if (pte_none(vmf->orig_pte)) { pte_unmap(vmf->pte); vmf->pte = NULL; @@ -5058,9 +5049,8 @@ retry_pud: if (!(ret & VM_FAULT_FALLBACK)) return ret; } else { - vmf.orig_pmd = *vmf.pmd; + vmf.orig_pmd = pmdp_get_lockless(vmf.pmd); - barrier(); if (unlikely(is_swap_pmd(vmf.orig_pmd))) { VM_BUG_ON(thp_migration_supported() && !is_pmd_migration_entry(vmf.orig_pmd)); diff --git a/mm/mprotect.c b/mm/mprotect.c index 92d3d3ca390a..c5a13c0f1017 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -309,11 +309,6 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) { pmd_t pmdval = pmdp_get_lockless(pmd); - /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - barrier(); -#endif - if (pmd_none(pmdval)) return 1; if (pmd_trans_huge(pmdval)) diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 4e448cfbc6ef..64aff6718bdb 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -210,7 +210,7 @@ restart: * compiler and used as a stale value after we've observed a * subsequent update. */ - pmde = READ_ONCE(*pvmw->pmd); + pmde = pmdp_get_lockless(pvmw->pmd); if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde) || (pmd_present(pmde) && pmd_devmap(pmde))) { -- cgit v1.2.3 From 0cb8fd4d14165a7e654048e43983d86f75b90879 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:08:20 -0700 Subject: mm/migrate: remove cruft from migration_entry_wait()s MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit migration_entry_wait_on_locked() does not need to take a mapped pte pointer, its callers can do the unmap first. Annotate it with __releases(ptl) to reduce sparse warnings. Fold __migration_entry_wait_huge() into migration_entry_wait_huge(). Fold __migration_entry_wait() into migration_entry_wait(), preferring the tighter pte_offset_map_lock() to pte_offset_map() and pte_lockptr(). Link: https://lkml.kernel.org/r/b0e2a532-cdf2-561b-e999-f3b13b8d6d3@google.com Signed-off-by: Hugh Dickins Reviewed-by: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- include/linux/migrate.h | 4 ++-- include/linux/swapops.h | 17 +++-------------- mm/filemap.c | 13 ++++--------- mm/migrate.c | 37 +++++++++++++------------------------ 4 files changed, 22 insertions(+), 49 deletions(-) (limited to 'mm') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 6de5756d8533..711dd9412561 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -75,8 +75,8 @@ bool isolate_movable_page(struct page *page, isolate_mode_t mode); int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src); -void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, - spinlock_t *ptl); +void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) + __releases(ptl); void folio_migrate_flags(struct folio *newfolio, struct folio *folio); void folio_migrate_copy(struct folio *newfolio, struct folio *folio); int folio_migrate_mapping(struct address_space *mapping, diff --git a/include/linux/swapops.h b/include/linux/swapops.h index 3a451b7afcb3..4c932cb45e0b 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -332,15 +332,9 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry) return false; } -extern void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, - spinlock_t *ptl); extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); -#ifdef CONFIG_HUGETLB_PAGE -extern void __migration_entry_wait_huge(struct vm_area_struct *vma, - pte_t *ptep, spinlock_t *ptl); extern void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte); -#endif /* CONFIG_HUGETLB_PAGE */ #else /* CONFIG_MIGRATION */ static inline swp_entry_t make_readable_migration_entry(pgoff_t offset) { @@ -362,15 +356,10 @@ static inline int is_migration_entry(swp_entry_t swp) return 0; } -static inline void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, - spinlock_t *ptl) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, - unsigned long address) { } -#ifdef CONFIG_HUGETLB_PAGE -static inline void __migration_entry_wait_huge(struct vm_area_struct *vma, - pte_t *ptep, spinlock_t *ptl) { } -static inline void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) { } -#endif /* CONFIG_HUGETLB_PAGE */ + unsigned long address) { } +static inline void migration_entry_wait_huge(struct vm_area_struct *vma, + pte_t *pte) { } static inline int is_writable_migration_entry(swp_entry_t entry) { return 0; diff --git a/mm/filemap.c b/mm/filemap.c index 916b7c6444fe..e0259fb823a5 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1362,8 +1362,6 @@ repeat: /** * migration_entry_wait_on_locked - Wait for a migration entry to be removed * @entry: migration swap entry. - * @ptep: mapped pte pointer. Will return with the ptep unmapped. Only required - * for pte entries, pass NULL for pmd entries. * @ptl: already locked ptl. This function will drop the lock. * * Wait for a migration entry referencing the given page to be removed. This is @@ -1372,13 +1370,13 @@ repeat: * should be called while holding the ptl for the migration entry referencing * the page. * - * Returns after unmapping and unlocking the pte/ptl with pte_unmap_unlock(). + * Returns after unlocking the ptl. * * This follows the same logic as folio_wait_bit_common() so see the comments * there. */ -void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, - spinlock_t *ptl) +void migration_entry_wait_on_locked(swp_entry_t entry, spinlock_t *ptl) + __releases(ptl) { struct wait_page_queue wait_page; wait_queue_entry_t *wait = &wait_page.wait; @@ -1412,10 +1410,7 @@ void migration_entry_wait_on_locked(swp_entry_t entry, pte_t *ptep, * a valid reference to the page, and it must take the ptl to remove the * migration entry. So the page is valid until the ptl is dropped. */ - if (ptep) - pte_unmap_unlock(ptep, ptl); - else - spin_unlock(ptl); + spin_unlock(ptl); for (;;) { unsigned int flags; diff --git a/mm/migrate.c b/mm/migrate.c index 30b5ce10935e..c1f2c40441e1 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -296,14 +296,18 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked) * get to the page and wait until migration is finished. * When we return from this function the fault will be retried. */ -void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, - spinlock_t *ptl) +void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, + unsigned long address) { + spinlock_t *ptl; + pte_t *ptep; pte_t pte; swp_entry_t entry; - spin_lock(ptl); + ptep = pte_offset_map_lock(mm, pmd, address, &ptl); pte = *ptep; + pte_unmap(ptep); + if (!is_swap_pte(pte)) goto out; @@ -311,18 +315,10 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, if (!is_migration_entry(entry)) goto out; - migration_entry_wait_on_locked(entry, ptep, ptl); + migration_entry_wait_on_locked(entry, ptl); return; out: - pte_unmap_unlock(ptep, ptl); -} - -void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, - unsigned long address) -{ - spinlock_t *ptl = pte_lockptr(mm, pmd); - pte_t *ptep = pte_offset_map(pmd, address); - __migration_entry_wait(mm, ptep, ptl); + spin_unlock(ptl); } #ifdef CONFIG_HUGETLB_PAGE @@ -332,9 +328,9 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, * * This function will release the vma lock before returning. */ -void __migration_entry_wait_huge(struct vm_area_struct *vma, - pte_t *ptep, spinlock_t *ptl) +void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *ptep) { + spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, ptep); pte_t pte; hugetlb_vma_assert_locked(vma); @@ -352,16 +348,9 @@ void __migration_entry_wait_huge(struct vm_area_struct *vma, * lock release in migration_entry_wait_on_locked(). */ hugetlb_vma_unlock_read(vma); - migration_entry_wait_on_locked(pte_to_swp_entry(pte), NULL, ptl); + migration_entry_wait_on_locked(pte_to_swp_entry(pte), ptl); } } - -void migration_entry_wait_huge(struct vm_area_struct *vma, pte_t *pte) -{ - spinlock_t *ptl = huge_pte_lockptr(hstate_vma(vma), vma->vm_mm, pte); - - __migration_entry_wait_huge(vma, pte, ptl); -} #endif #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION @@ -372,7 +361,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd) ptl = pmd_lock(mm, pmd); if (!is_pmd_migration_entry(*pmd)) goto unlock; - migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), NULL, ptl); + migration_entry_wait_on_locked(pmd_to_swp_entry(*pmd), ptl); return; unlock: spin_unlock(ptl); -- cgit v1.2.3 From 0d940a9b270b9220dcff74d8e9123c9788365751 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:10:32 -0700 Subject: mm/pgtable: allow pte_offset_map[_lock]() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make pte_offset_map() a wrapper for __pte_offset_map() (optionally outputs pmdval), pte_offset_map_lock() a sparse __cond_lock wrapper for __pte_offset_map_lock(): those __funcs added in mm/pgtable-generic.c. __pte_offset_map() do pmdval validation (including pmd_clear_bad() when pmd_bad()), returning NULL if pmdval is not for a page table. __pte_offset_map_lock() verify pmdval unchanged after getting the lock, trying again if it changed. No #ifdef CONFIG_TRANSPARENT_HUGEPAGE around them: that could be done to cover the imminent case, but we expect to generalize it later, and it makes a mess of where to do the pmd_bad() clearing. Add pte_offset_map_nolock(): outputs ptl like pte_offset_map_lock(), without actually taking the lock. This will be preferred to open uses of pte_lockptr(), because (when split ptlock is in page table's struct page) it points to the right lock for the returned pte pointer, even if *pmd gets changed racily afterwards. Update corresponding Documentation. Do not add the anticipated rcu_read_lock() and rcu_read_unlock()s yet: they have to wait until all architectures are balancing pte_offset_map()s with pte_unmap()s (as in the arch series posted earlier). But comment where they will go, so that it's easy to add them for experiments. And only when those are in place can transient racy failure cases be enabled. Add more safety for the PAE mismatched pmd_low pmd_high case at that time. Link: https://lkml.kernel.org/r/2929bfd-9893-a374-e463-4c3127ff9b9d@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- Documentation/mm/split_page_table_lock.rst | 17 +++++---- include/linux/mm.h | 27 +++++++++----- include/linux/pgtable.h | 22 ++++++++---- mm/pgtable-generic.c | 56 ++++++++++++++++++++++++++++++ 4 files changed, 101 insertions(+), 21 deletions(-) (limited to 'mm') diff --git a/Documentation/mm/split_page_table_lock.rst b/Documentation/mm/split_page_table_lock.rst index 50ee0dfc95be..a834fad9de12 100644 --- a/Documentation/mm/split_page_table_lock.rst +++ b/Documentation/mm/split_page_table_lock.rst @@ -14,15 +14,20 @@ tables. Access to higher level tables protected by mm->page_table_lock. There are helpers to lock/unlock a table and other accessor functions: - pte_offset_map_lock() - maps pte and takes PTE table lock, returns pointer to the taken - lock; + maps PTE and takes PTE table lock, returns pointer to PTE with + pointer to its PTE table lock, or returns NULL if no PTE table; + - pte_offset_map_nolock() + maps PTE, returns pointer to PTE with pointer to its PTE table + lock (not taken), or returns NULL if no PTE table; + - pte_offset_map() + maps PTE, returns pointer to PTE, or returns NULL if no PTE table; + - pte_unmap() + unmaps PTE table; - pte_unmap_unlock() unlocks and unmaps PTE table; - pte_alloc_map_lock() - allocates PTE table if needed and take the lock, returns pointer - to taken lock or NULL if allocation failed; - - pte_lockptr() - returns pointer to PTE table lock; + allocates PTE table if needed and takes its lock, returns pointer to + PTE with pointer to its lock, or returns NULL if allocation failed; - pmd_lock() takes PMD table lock, returns pointer to taken lock; - pmd_lockptr() diff --git a/include/linux/mm.h b/include/linux/mm.h index 66032f0d515c..a08dc8cc48fb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2827,14 +2827,25 @@ static inline void pgtable_pte_page_dtor(struct page *page) dec_lruvec_page_state(page, NR_PAGETABLE); } -#define pte_offset_map_lock(mm, pmd, address, ptlp) \ -({ \ - spinlock_t *__ptl = pte_lockptr(mm, pmd); \ - pte_t *__pte = pte_offset_map(pmd, address); \ - *(ptlp) = __ptl; \ - spin_lock(__ptl); \ - __pte; \ -}) +pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp); +static inline pte_t *pte_offset_map(pmd_t *pmd, unsigned long addr) +{ + return __pte_offset_map(pmd, addr, NULL); +} + +pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp); +static inline pte_t *pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp) +{ + pte_t *pte; + + __cond_lock(*ptlp, pte = __pte_offset_map_lock(mm, pmd, addr, ptlp)); + return pte; +} + +pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp); #define pte_unmap_unlock(pte, ptl) do { \ spin_unlock(ptl); \ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 94235ff2706e..3fabbb018557 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -94,14 +94,22 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address) #define pte_offset_kernel pte_offset_kernel #endif -#if defined(CONFIG_HIGHPTE) -#define pte_offset_map(dir, address) \ - ((pte_t *)kmap_local_page(pmd_page(*(dir))) + \ - pte_index((address))) -#define pte_unmap(pte) kunmap_local((pte)) +#ifdef CONFIG_HIGHPTE +#define __pte_map(pmd, address) \ + ((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address))) +#define pte_unmap(pte) do { \ + kunmap_local((pte)); \ + /* rcu_read_unlock() to be added later */ \ +} while (0) #else -#define pte_offset_map(dir, address) pte_offset_kernel((dir), (address)) -#define pte_unmap(pte) ((void)(pte)) /* NOP */ +static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address) +{ + return pte_offset_kernel(pmd, address); +} +static inline void pte_unmap(pte_t *pte) +{ + /* rcu_read_unlock() to be added later */ +} #endif /* Find an entry in the second-level page table.. */ diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index d2fc52bffafc..c7ab18a5fb77 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include #include @@ -229,3 +231,57 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address, } #endif #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp) +{ + pmd_t pmdval; + + /* rcu_read_lock() to be added later */ + pmdval = pmdp_get_lockless(pmd); + if (pmdvalp) + *pmdvalp = pmdval; + if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval))) + goto nomap; + if (unlikely(pmd_trans_huge(pmdval) || pmd_devmap(pmdval))) + goto nomap; + if (unlikely(pmd_bad(pmdval))) { + pmd_clear_bad(pmd); + goto nomap; + } + return __pte_map(&pmdval, addr); +nomap: + /* rcu_read_unlock() to be added later */ + return NULL; +} + +pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp) +{ + pmd_t pmdval; + pte_t *pte; + + pte = __pte_offset_map(pmd, addr, &pmdval); + if (likely(pte)) + *ptlp = pte_lockptr(mm, &pmdval); + return pte; +} + +pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd, + unsigned long addr, spinlock_t **ptlp) +{ + spinlock_t *ptl; + pmd_t pmdval; + pte_t *pte; +again: + pte = __pte_offset_map(pmd, addr, &pmdval); + if (unlikely(!pte)) + return pte; + ptl = pte_lockptr(mm, &pmdval); + spin_lock(ptl); + if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) { + *ptlp = ptl; + return pte; + } + pte_unmap_unlock(pte, ptl); + goto again; +} -- cgit v1.2.3 From 65747aaf42b7db6acb8e57a2b8e9959928f404dd Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:11:29 -0700 Subject: mm/filemap: allow pte_offset_map_lock() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit filemap_map_pages() allow pte_offset_map_lock() to fail; and remove the pmd_devmap_trans_unstable() check from filemap_map_pmd(), which can safely return to filemap_map_pages() and let pte_offset_map_lock() discover that. Link: https://lkml.kernel.org/r/54607cf4-ddb6-7ef3-043-1d2de1a9a71@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/filemap.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index e0259fb823a5..1893048ec9ff 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3414,13 +3414,6 @@ static bool filemap_map_pmd(struct vm_fault *vmf, struct folio *folio, if (pmd_none(*vmf->pmd)) pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); - /* See comment in handle_pte_fault() */ - if (pmd_devmap_trans_unstable(vmf->pmd)) { - folio_unlock(folio); - folio_put(folio); - return true; - } - return false; } @@ -3507,6 +3500,11 @@ vm_fault_t filemap_map_pages(struct vm_fault *vmf, addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); + if (!vmf->pte) { + folio_unlock(folio); + folio_put(folio); + goto out; + } do { again: page = folio_file_page(folio, xas.xa_index); -- cgit v1.2.3 From 45fe85e9811ede2d65b21724cae50d6a0563e452 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:12:52 -0700 Subject: mm/page_vma_mapped: delete bogosity in page_vma_mapped_walk() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Revert commit a7a69d8ba88d ("mm/thp: another PVMW_SYNC fix in page_vma_mapped_walk()"): I was proud of that "Aha!" commit at the time, but in revisiting page_vma_mapped_walk() for pte_offset_map() failure, that block raised a doubt: and it now seems utterly bogus. The prior map_pte() has taken ptl unconditionally when PVMW_SYNC: I must have forgotten that when making the change. It did no harm, but could not have fixed a BUG or WARN, and is hard to reconcile with coming changes. Link: https://lkml.kernel.org/r/87475a22-e59e-2d8b-d78a-df376d314bd@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/page_vma_mapped.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'mm') diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 64aff6718bdb..007dc7456f0e 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -275,10 +275,6 @@ next_pte: goto restart; } pvmw->pte++; - if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) { - pvmw->ptl = pte_lockptr(mm, pvmw->pmd); - spin_lock(pvmw->ptl); - } } while (pte_none(*pvmw->pte)); if (!pvmw->ptl) { -- cgit v1.2.3 From 90f43b0a13cddb09e2686f4d976751c0a9b8b197 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:14:12 -0700 Subject: mm/page_vma_mapped: reformat map_pte() with less indentation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No functional change here, but adjust the format of map_pte() so that the following commit will be easier to read: separate out the PVMW_SYNC case first, and remove two levels of indentation from the ZONE_DEVICE case. Link: https://lkml.kernel.org/r/bf723f59-e3fc-6839-1cc3-c0631ee248bc@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/page_vma_mapped.c | 65 +++++++++++++++++++++++++++------------------------- 1 file changed, 34 insertions(+), 31 deletions(-) (limited to 'mm') diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 007dc7456f0e..947dc7491815 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -15,38 +15,41 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw) static bool map_pte(struct page_vma_mapped_walk *pvmw) { - pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); - if (!(pvmw->flags & PVMW_SYNC)) { - if (pvmw->flags & PVMW_MIGRATION) { - if (!is_swap_pte(*pvmw->pte)) - return false; - } else { - /* - * We get here when we are trying to unmap a private - * device page from the process address space. Such - * page is not CPU accessible and thus is mapped as - * a special swap entry, nonetheless it still does - * count as a valid regular mapping for the page (and - * is accounted as such in page maps count). - * - * So handle this special case as if it was a normal - * page mapping ie lock CPU page table and returns - * true. - * - * For more details on device private memory see HMM - * (include/linux/hmm.h or mm/hmm.c). - */ - if (is_swap_pte(*pvmw->pte)) { - swp_entry_t entry; + if (pvmw->flags & PVMW_SYNC) { + /* Use the stricter lookup */ + pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, + pvmw->address, &pvmw->ptl); + return true; + } - /* Handle un-addressable ZONE_DEVICE memory */ - entry = pte_to_swp_entry(*pvmw->pte); - if (!is_device_private_entry(entry) && - !is_device_exclusive_entry(entry)) - return false; - } else if (!pte_present(*pvmw->pte)) - return false; - } + pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); + if (pvmw->flags & PVMW_MIGRATION) { + if (!is_swap_pte(*pvmw->pte)) + return false; + } else if (is_swap_pte(*pvmw->pte)) { + swp_entry_t entry; + /* + * Handle un-addressable ZONE_DEVICE memory. + * + * We get here when we are trying to unmap a private + * device page from the process address space. Such + * page is not CPU accessible and thus is mapped as + * a special swap entry, nonetheless it still does + * count as a valid regular mapping for the page + * (and is accounted as such in page maps count). + * + * So handle this special case as if it was a normal + * page mapping ie lock CPU page table and return true. + * + * For more details on device private memory see HMM + * (include/linux/hmm.h or mm/hmm.c). + */ + entry = pte_to_swp_entry(*pvmw->pte); + if (!is_device_private_entry(entry) && + !is_device_exclusive_entry(entry)) + return false; + } else if (!pte_present(*pvmw->pte)) { + return false; } pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); spin_lock(pvmw->ptl); -- cgit v1.2.3 From 2798bbe75b9c2752b46d292e5c2a49f49da36418 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:15:43 -0700 Subject: mm/page_vma_mapped: pte_offset_map_nolock() not pte_lockptr() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit map_pte() use pte_offset_map_nolock(), to make sure of the ptl belonging to pte, even if pmd entry is then changed racily: page_vma_mapped_walk() use that instead of getting pte_lockptr() later, or restart if map_pte() found no page table. Link: https://lkml.kernel.org/r/cba186e0-5ed7-e81b-6cd-dade4c33c248@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/page_vma_mapped.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 947dc7491815..2af734274073 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -13,16 +13,28 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw) return false; } -static bool map_pte(struct page_vma_mapped_walk *pvmw) +static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) { if (pvmw->flags & PVMW_SYNC) { /* Use the stricter lookup */ pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, pvmw->address, &pvmw->ptl); - return true; + *ptlp = pvmw->ptl; + return !!pvmw->pte; } - pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address); + /* + * It is important to return the ptl corresponding to pte, + * in case *pvmw->pmd changes underneath us; so we need to + * return it even when choosing not to lock, in case caller + * proceeds to loop over next ptes, and finds a match later. + * Though, in most cases, page lock already protects this. + */ + pvmw->pte = pte_offset_map_nolock(pvmw->vma->vm_mm, pvmw->pmd, + pvmw->address, ptlp); + if (!pvmw->pte) + return false; + if (pvmw->flags & PVMW_MIGRATION) { if (!is_swap_pte(*pvmw->pte)) return false; @@ -51,7 +63,7 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw) } else if (!pte_present(*pvmw->pte)) { return false; } - pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd); + pvmw->ptl = *ptlp; spin_lock(pvmw->ptl); return true; } @@ -156,6 +168,7 @@ bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw) struct vm_area_struct *vma = pvmw->vma; struct mm_struct *mm = vma->vm_mm; unsigned long end; + spinlock_t *ptl; pgd_t *pgd; p4d_t *p4d; pud_t *pud; @@ -257,8 +270,11 @@ restart: step_forward(pvmw, PMD_SIZE); continue; } - if (!map_pte(pvmw)) + if (!map_pte(pvmw, &ptl)) { + if (!pvmw->pte) + goto restart; goto next_pte; + } this_pte: if (check_pte(pvmw)) return true; @@ -281,7 +297,7 @@ next_pte: } while (pte_none(*pvmw->pte)); if (!pvmw->ptl) { - pvmw->ptl = pte_lockptr(mm, pvmw->pmd); + pvmw->ptl = ptl; spin_lock(pvmw->ptl); } goto this_pte; -- cgit v1.2.3 From 7780d04046a2288ab85d88bedacc60fa4fad9971 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:17:26 -0700 Subject: mm/pagewalkers: ACTION_AGAIN if pte_offset_map_lock() fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Simple walk_page_range() users should set ACTION_AGAIN to retry when pte_offset_map_lock() fails. No need to check pmd_trans_unstable(): that was precisely to avoid the possiblity of calling pte_offset_map() on a racily removed or inserted THP entry, but such cases are now safely handled inside it. Likewise there is no need to check pmd_none() or pmd_bad() before calling it. Link: https://lkml.kernel.org/r/c77d9d10-3aad-e3ce-4896-99e91c7947f3@google.com Signed-off-by: Hugh Dickins Reviewed-by: SeongJae Park for mm/damon part Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- fs/proc/task_mmu.c | 32 ++++++++++++++++---------------- mm/damon/vaddr.c | 12 ++++++++---- mm/mempolicy.c | 7 ++++--- mm/mincore.c | 9 ++++----- mm/mlock.c | 4 ++++ 5 files changed, 36 insertions(+), 28 deletions(-) (limited to 'mm') diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 6259dd432eeb..0d63b6a0f0d8 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -631,14 +631,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, goto out; } - if (pmd_trans_unstable(pmd)) - goto out; - /* - * The mmap_lock held all the way back in m_start() is what - * keeps khugepaged out of here and from collapsing things - * in here. - */ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } for (; addr != end; pte++, addr += PAGE_SIZE) smaps_pte_entry(pte, addr, walk); pte_unmap_unlock(pte - 1, ptl); @@ -1191,10 +1188,11 @@ out: return 0; } - if (pmd_trans_unstable(pmd)) - return 0; - pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } for (; addr != end; pte++, addr += PAGE_SIZE) { ptent = *pte; @@ -1538,9 +1536,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, spin_unlock(ptl); return err; } - - if (pmd_trans_unstable(pmdp)) - return 0; #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ /* @@ -1548,6 +1543,10 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, * goes beyond vma->vm_end. */ orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return err; + } for (; addr < end; pte++, addr += PAGE_SIZE) { pagemap_entry_t pme; @@ -1887,11 +1886,12 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, spin_unlock(ptl); return 0; } - - if (pmd_trans_unstable(pmd)) - return 0; #endif orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } do { struct page *page = can_gather_numa_stats(*pte, vma, addr); if (!page) diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index 37994fb6120c..e814f66dfc2e 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -318,9 +318,11 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, spin_unlock(ptl); } - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - return 0; pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } if (!pte_present(*pte)) goto out; damon_ptep_mkold(pte, walk->vma, addr); @@ -464,9 +466,11 @@ huge_out: regular_page: #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - return -EINVAL; pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } if (!pte_present(*pte)) goto out; folio = damon_get_folio(pte_pfn(*pte)); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index f06ca8c18e62..0241bb64978b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -514,10 +514,11 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, if (ptl) return queue_folios_pmd(pmd, ptl, addr, end, walk); - if (pmd_trans_unstable(pmd)) - return 0; - mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) { + walk->action = ACTION_AGAIN; + return 0; + } for (; addr != end; pte++, addr += PAGE_SIZE) { if (!pte_present(*pte)) continue; diff --git a/mm/mincore.c b/mm/mincore.c index 2d5be013a25a..f33f6a0b1ded 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -113,12 +113,11 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, goto out; } - if (pmd_trans_unstable(pmd)) { - __mincore_unmapped_range(addr, end, vma, vec); - goto out; - } - ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!ptep) { + walk->action = ACTION_AGAIN; + return 0; + } for (; addr != end; ptep++, addr += PAGE_SIZE) { pte_t pte = *ptep; diff --git a/mm/mlock.c b/mm/mlock.c index 40b43f8740df..9f2b1173b1b1 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -329,6 +329,10 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, } start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!start_pte) { + walk->action = ACTION_AGAIN; + return 0; + } for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { if (!pte_present(*pte)) continue; -- cgit v1.2.3 From be872f83bf571f4f9a0ac25e2c9c36e905a36619 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:18:49 -0700 Subject: mm/pagewalk: walk_pte_range() allow for pte_offset_map() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit walk_pte_range() has a no_vma option to serve walk_page_range_novma(). I don't know of any problem, but it looks safer to check for init_mm, and use pte_offset_kernel() rather than pte_offset_map() in that case: pte_offset_map()'s pmdval validation is intended for userspace. Allow for its pte_offset_map() or pte_offset_map_lock() to fail, and retry with ACTION_AGAIN if so. Add a second check for ACTION_AGAIN in walk_pmd_range(), to catch it after return from walk_pte_range(). Remove the pmd_trans_unstable() check after split_huge_pmd() in walk_pmd_range(): walk_pte_range() now handles those cases safely (and they must fail powerpc's is_hugepd() check). Link: https://lkml.kernel.org/r/3eba6f0-2b-fb66-6bb6-2ee8533e221@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/pagewalk.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/pagewalk.c b/mm/pagewalk.c index cb23f8a15c13..64437105fe0d 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c @@ -46,15 +46,27 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, spinlock_t *ptl; if (walk->no_vma) { - pte = pte_offset_map(pmd, addr); - err = walk_pte_range_inner(pte, addr, end, walk); - pte_unmap(pte); + /* + * pte_offset_map() might apply user-specific validation. + */ + if (walk->mm == &init_mm) + pte = pte_offset_kernel(pmd, addr); + else + pte = pte_offset_map(pmd, addr); + if (pte) { + err = walk_pte_range_inner(pte, addr, end, walk); + if (walk->mm != &init_mm) + pte_unmap(pte); + } } else { pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); - err = walk_pte_range_inner(pte, addr, end, walk); - pte_unmap_unlock(pte, ptl); + if (pte) { + err = walk_pte_range_inner(pte, addr, end, walk); + pte_unmap_unlock(pte, ptl); + } } - + if (!pte) + walk->action = ACTION_AGAIN; return err; } @@ -141,11 +153,8 @@ again: !(ops->pte_entry)) continue; - if (walk->vma) { + if (walk->vma) split_huge_pmd(walk->vma, pmd, addr); - if (pmd_trans_unstable(pmd)) - goto again; - } if (is_hugepd(__hugepd(pmd_val(*pmd)))) err = walk_hugepd_range((hugepd_t *)pmd, addr, next, walk, PMD_SHIFT); @@ -153,6 +162,10 @@ again: err = walk_pte_range(pmd, addr, next, walk); if (err) break; + + if (walk->action == ACTION_AGAIN) + goto again; + } while (pmd++, addr = next, addr != end); return err; -- cgit v1.2.3 From e5ad581c7f1c32d309ae4e895eea0cd1a3d9f363 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:20:04 -0700 Subject: mm/vmwgfx: simplify pmd & pud mapping dirty helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit wp_clean_pmd_entry() need not check pmd_trans_unstable() or pmd_none(), wp_clean_pud_entry() need not check pud_trans_unstable() or pud_none(): it's just the ACTION_CONTINUE when trans_huge or devmap that's needed to prevent splitting, and we're hoping to remove pmd_trans_unstable(). Is that PUD #ifdef necessary? Maybe some configs are missing a stub. Link: https://lkml.kernel.org/r/d3379c7-65db-26d3-1764-8e866490925f@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/mapping_dirty_helpers.c | 34 +++++++++------------------------- 1 file changed, 9 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c index e1eb33f49059..87b4beeda4fa 100644 --- a/mm/mapping_dirty_helpers.c +++ b/mm/mapping_dirty_helpers.c @@ -128,19 +128,11 @@ static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, { pmd_t pmdval = pmdp_get_lockless(pmd); - if (!pmd_trans_unstable(&pmdval)) - return 0; - - if (pmd_none(pmdval)) { - walk->action = ACTION_AGAIN; - return 0; - } - - /* Huge pmd, present or migrated */ - walk->action = ACTION_CONTINUE; - if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) + /* Do not split a huge pmd, present or migrated */ + if (pmd_trans_huge(pmdval) || pmd_devmap(pmdval)) { WARN_ON(pmd_write(pmdval) || pmd_dirty(pmdval)); - + walk->action = ACTION_CONTINUE; + } return 0; } @@ -156,23 +148,15 @@ static int wp_clean_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long end, static int wp_clean_pud_entry(pud_t *pud, unsigned long addr, unsigned long end, struct mm_walk *walk) { +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD pud_t pudval = READ_ONCE(*pud); - if (!pud_trans_unstable(&pudval)) - return 0; - - if (pud_none(pudval)) { - walk->action = ACTION_AGAIN; - return 0; - } - -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD - /* Huge pud */ - walk->action = ACTION_CONTINUE; - if (pud_trans_huge(pudval) || pud_devmap(pudval)) + /* Do not split a huge pud */ + if (pud_trans_huge(pudval) || pud_devmap(pudval)) { WARN_ON(pud_write(pudval) || pud_dirty(pudval)); + walk->action = ACTION_CONTINUE; + } #endif - return 0; } -- cgit v1.2.3 From 0d1c81edc61e553ed7a5db18fb8074c8b78e1538 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:21:41 -0700 Subject: mm/vmalloc: vmalloc_to_page() use pte_offset_kernel() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit vmalloc_to_page() was using pte_offset_map() (followed by pte_unmap()), but it's intended for userspace page tables: prefer pte_offset_kernel(). Link: https://lkml.kernel.org/r/696386a-84f8-b33c-82e5-f865ed6eb39@google.com Signed-off-by: Hugh Dickins Reviewed-by: Lorenzo Stoakes Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/vmalloc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index d07d0140ebd3..7382e0a60ce1 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -703,11 +703,10 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) if (WARN_ON_ONCE(pmd_bad(*pmd))) return NULL; - ptep = pte_offset_map(pmd, addr); + ptep = pte_offset_kernel(pmd, addr); pte = *ptep; if (pte_present(pte)) page = pte_page(pte); - pte_unmap(ptep); return page; } -- cgit v1.2.3 From 6ec1905f6ec7f9f79ca3eaeaf04584b4dcddd743 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:23:19 -0700 Subject: mm/hmm: retry if pte_offset_map() fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit hmm_vma_walk_pmd() is called through mm_walk, but already has a goto again loop of its own, so take part in that if pte_offset_map() fails. Link: https://lkml.kernel.org/r/d6c6dd68-25d4-653b-f94b-a45c53ee04b@google.com Signed-off-by: Hugh Dickins Reviewed-by: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/hmm.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'mm') diff --git a/mm/hmm.c b/mm/hmm.c index e23043345615..b1a9159d7c92 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -381,6 +381,8 @@ again: } ptep = pte_offset_map(pmdp, addr); + if (!ptep) + goto again; for (; addr < end; addr += PAGE_SIZE, ptep++, hmm_pfns++) { int r; -- cgit v1.2.3 From 3622d3cde30898c1b6eafde281c122b994718c58 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:26:04 -0700 Subject: mm/userfaultfd: allow pte_offset_map_lock() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mfill_atomic_install_pte() and mfill_atomic_pte_zeropage() treat failed pte_offset_map_lock() as -EAGAIN, which mfill_atomic() already returns to user for a similar race. Link: https://lkml.kernel.org/r/50cf3930-1bfa-4de9-a079-3da47b7ce17b@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/userfaultfd.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'mm') diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index e97a0b4889fc..5fd787158c70 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -76,7 +76,10 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, if (flags & MFILL_ATOMIC_WP) _dst_pte = pte_mkuffd_wp(_dst_pte); + ret = -EAGAIN; dst_pte = pte_offset_map_lock(dst_mm, dst_pmd, dst_addr, &ptl); + if (!dst_pte) + goto out; if (vma_is_shmem(dst_vma)) { /* serialize against truncate with the page table lock */ @@ -121,6 +124,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, ret = 0; out_unlock: pte_unmap_unlock(dst_pte, ptl); +out: return ret; } @@ -212,7 +216,10 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, _dst_pte = pte_mkspecial(pfn_pte(my_zero_pfn(dst_addr), dst_vma->vm_page_prot)); + ret = -EAGAIN; dst_pte = pte_offset_map_lock(dst_vma->vm_mm, dst_pmd, dst_addr, &ptl); + if (!dst_pte) + goto out; if (dst_vma->vm_file) { /* the shmem MAP_PRIVATE case requires checking the i_size */ inode = dst_vma->vm_file->f_inode; @@ -231,6 +238,7 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, ret = 0; out_unlock: pte_unmap_unlock(dst_pte, ptl); +out: return ret; } -- cgit v1.2.3 From 9f2bad096d2f84751fd4559fcd4cdda1a2af1976 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:27:52 -0700 Subject: mm/debug_vm_pgtable,page_table_check: warn pte map fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Failures here would be surprising: pte_advanced_tests() and pte_clear_tests() and __page_table_check_pte_clear_range() each issue a warning if pte_offset_map() or pte_offset_map_lock() fails. Link: https://lkml.kernel.org/r/3ea9e4f-e5cf-d7d9-4c2-291b3c5a3636@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/debug_vm_pgtable.c | 9 ++++++++- mm/page_table_check.c | 2 ++ 2 files changed, 10 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c index c54177aabebd..ee119e33fef1 100644 --- a/mm/debug_vm_pgtable.c +++ b/mm/debug_vm_pgtable.c @@ -138,6 +138,9 @@ static void __init pte_advanced_tests(struct pgtable_debug_args *args) return; pr_debug("Validating PTE advanced\n"); + if (WARN_ON(!args->ptep)) + return; + pte = pfn_pte(args->pte_pfn, args->page_prot); set_pte_at(args->mm, args->vaddr, args->ptep, pte); flush_dcache_page(page); @@ -619,6 +622,9 @@ static void __init pte_clear_tests(struct pgtable_debug_args *args) * the unexpected overhead of cache flushing is acceptable. */ pr_debug("Validating PTE clear\n"); + if (WARN_ON(!args->ptep)) + return; + #ifndef CONFIG_RISCV pte = __pte(pte_val(pte) | RANDOM_ORVALUE); #endif @@ -1377,7 +1383,8 @@ static int __init debug_vm_pgtable(void) args.ptep = pte_offset_map_lock(args.mm, args.pmdp, args.vaddr, &ptl); pte_clear_tests(&args); pte_advanced_tests(&args); - pte_unmap_unlock(args.ptep, ptl); + if (args.ptep) + pte_unmap_unlock(args.ptep, ptl); ptl = pmd_lock(args.mm, args.pmdp); pmd_clear_tests(&args); diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 25d8610c0042..0c511330dbc9 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -240,6 +240,8 @@ void __page_table_check_pte_clear_range(struct mm_struct *mm, pte_t *ptep = pte_offset_map(&pmd, addr); unsigned long i; + if (WARN_ON(!ptep)) + return; for (i = 0; i < PTRS_PER_PTE; i++) { __page_table_check_pte_clear(mm, addr, *ptep); addr += PAGE_SIZE; -- cgit v1.2.3 From 04dee9e85cf50a2f24738e456d66b88de109b806 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:29:22 -0700 Subject: mm/various: give up if pte_offset_map[_lock]() fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Following the examples of nearby code, various functions can just give up if pte_offset_map() or pte_offset_map_lock() fails. And there's no need for a preliminary pmd_trans_unstable() or other such check, since such cases are now safely handled inside. Link: https://lkml.kernel.org/r/7b9bd85d-1652-cbf2-159d-f503b45e5b@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/gup.c | 9 ++++++--- mm/ksm.c | 7 ++++--- mm/memcontrol.c | 8 ++++---- mm/memory-failure.c | 8 +++++--- mm/migrate.c | 3 +++ 5 files changed, 22 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index d448fd286b8c..598e8c98367b 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -545,10 +545,10 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, if (WARN_ON_ONCE((flags & (FOLL_PIN | FOLL_GET)) == (FOLL_PIN | FOLL_GET))) return ERR_PTR(-EINVAL); - if (unlikely(pmd_bad(*pmd))) - return no_page_table(vma, flags); ptep = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!ptep) + return no_page_table(vma, flags); pte = *ptep; if (!pte_present(pte)) goto no_page; @@ -852,8 +852,9 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, pmd = pmd_offset(pud, address); if (!pmd_present(*pmd)) return -EFAULT; - VM_BUG_ON(pmd_trans_huge(*pmd)); pte = pte_offset_map(pmd, address); + if (!pte) + return -EFAULT; if (pte_none(*pte)) goto unmap; *vma = get_gate_vma(mm); @@ -2468,6 +2469,8 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, pte_t *ptep, *ptem; ptem = ptep = pte_offset_map(&pmd, addr); + if (!ptep) + return 0; do { pte_t pte = ptep_get_lockless(ptep); struct page *page; diff --git a/mm/ksm.c b/mm/ksm.c index df2aa281d49d..3dc15459dd20 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -431,10 +431,9 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex pte_t *pte; int ret; - if (pmd_leaf(*pmd) || !pmd_present(*pmd)) - return 0; - pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); + if (!pte) + return 0; if (pte_present(*pte)) { page = vm_normal_page(walk->vma, addr, *pte); } else if (!pte_none(*pte)) { @@ -1203,6 +1202,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, mmu_notifier_invalidate_range_start(&range); ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!ptep) + goto out_mn; if (!pte_same(*ptep, orig_pte)) { pte_unmap_unlock(ptep, ptl); goto out_mn; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index caf6ab55f8e3..77d8d2d14fcf 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6021,9 +6021,9 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, return 0; } - if (pmd_trans_unstable(pmd)) - return 0; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) + return 0; for (; addr != end; pte++, addr += PAGE_SIZE) if (get_mctgt_type(vma, addr, *pte, NULL)) mc.precharge++; /* increment precharge temporarily */ @@ -6241,10 +6241,10 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, return 0; } - if (pmd_trans_unstable(pmd)) - return 0; retry: pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) + return 0; for (; addr != end; addr += PAGE_SIZE) { pte_t ptent = *(pte++); bool device = false; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 004a02f44271..d5116f0eb1b6 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -405,6 +405,8 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, if (pmd_devmap(*pmd)) return PMD_SHIFT; pte = pte_offset_map(pmd, address); + if (!pte) + return 0; if (pte_present(*pte) && pte_devmap(*pte)) ret = PAGE_SHIFT; pte_unmap(pte); @@ -791,11 +793,11 @@ static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, goto out; } - if (pmd_trans_unstable(pmdp)) - goto out; - mapped_pte = ptep = pte_offset_map_lock(walk->vma->vm_mm, pmdp, addr, &ptl); + if (!ptep) + goto out; + for (; addr != end; ptep++, addr += PAGE_SIZE) { ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT, hwp->pfn, &hwp->tk); diff --git a/mm/migrate.c b/mm/migrate.c index c1f2c40441e1..363562992046 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -305,6 +305,9 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, swp_entry_t entry; ptep = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!ptep) + return; + pte = *ptep; pte_unmap(ptep); -- cgit v1.2.3 From 670ddd8cdcbd1d07a4571266ae3517f821728c3a Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:30:48 -0700 Subject: mm/mprotect: delete pmd_none_or_clear_bad_unless_trans_huge() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit change_pmd_range() had special pmd_none_or_clear_bad_unless_trans_huge(), required to avoid "bad" choices when setting automatic NUMA hinting under mmap_read_lock(); but most of that is already covered in pte_offset_map() now. change_pmd_range() just wants a pmd_none() check before wasting time on MMU notifiers, then checks on the read-once _pmd value to work out what's needed for huge cases. If change_pte_range() returns -EAGAIN to retry if pte_offset_map_lock() fails, nothing more special is needed. Link: https://lkml.kernel.org/r/725a42a9-91e9-c868-925-e3a5fd40bb4f@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/mprotect.c | 74 ++++++++++++++--------------------------------------------- 1 file changed, 17 insertions(+), 57 deletions(-) (limited to 'mm') diff --git a/mm/mprotect.c b/mm/mprotect.c index c5a13c0f1017..64e1df0af514 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -93,22 +93,9 @@ static long change_pte_range(struct mmu_gather *tlb, bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE; tlb_change_page_size(tlb, PAGE_SIZE); - - /* - * Can be called with only the mmap_lock for reading by - * prot_numa so we must check the pmd isn't constantly - * changing from under us from pmd_none to pmd_trans_huge - * and/or the other way around. - */ - if (pmd_trans_unstable(pmd)) - return 0; - - /* - * The pmd points to a regular pte so the pmd can't change - * from under us even if the mmap_lock is only hold for - * reading. - */ pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!pte) + return -EAGAIN; /* Get target node for single threaded private VMAs */ if (prot_numa && !(vma->vm_flags & VM_SHARED) && @@ -301,26 +288,6 @@ static long change_pte_range(struct mmu_gather *tlb, return pages; } -/* - * Used when setting automatic NUMA hinting protection where it is - * critical that a numa hinting PMD is not confused with a bad PMD. - */ -static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd) -{ - pmd_t pmdval = pmdp_get_lockless(pmd); - - if (pmd_none(pmdval)) - return 1; - if (pmd_trans_huge(pmdval)) - return 0; - if (unlikely(pmd_bad(pmdval))) { - pmd_clear_bad(pmd); - return 1; - } - - return 0; -} - /* * Return true if we want to split THPs into PTE mappings in change * protection procedure, false otherwise. @@ -398,7 +365,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb, pmd = pmd_offset(pud, addr); do { long ret; - + pmd_t _pmd; +again: next = pmd_addr_end(addr, end); ret = change_pmd_prepare(vma, pmd, cp_flags); @@ -406,16 +374,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb, pages = ret; break; } - /* - * Automatic NUMA balancing walks the tables with mmap_lock - * held for read. It's possible a parallel update to occur - * between pmd_trans_huge() and a pmd_none_or_clear_bad() - * check leading to a false positive and clearing. - * Hence, it's necessary to atomically read the PMD value - * for all the checks. - */ - if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) && - pmd_none_or_clear_bad_unless_trans_huge(pmd)) + + if (pmd_none(*pmd)) goto next; /* invoke the mmu notifier if the pmd is populated */ @@ -426,7 +386,8 @@ static inline long change_pmd_range(struct mmu_gather *tlb, mmu_notifier_invalidate_range_start(&range); } - if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { + _pmd = pmdp_get_lockless(pmd); + if (is_swap_pmd(_pmd) || pmd_trans_huge(_pmd) || pmd_devmap(_pmd)) { if ((next - addr != HPAGE_PMD_SIZE) || pgtable_split_needed(vma, cp_flags)) { __split_huge_pmd(vma, pmd, addr, false, NULL); @@ -441,15 +402,10 @@ static inline long change_pmd_range(struct mmu_gather *tlb, break; } } else { - /* - * change_huge_pmd() does not defer TLB flushes, - * so no need to propagate the tlb argument. - */ - int nr_ptes = change_huge_pmd(tlb, vma, pmd, + ret = change_huge_pmd(tlb, vma, pmd, addr, newprot, cp_flags); - - if (nr_ptes) { - if (nr_ptes == HPAGE_PMD_NR) { + if (ret) { + if (ret == HPAGE_PMD_NR) { pages += HPAGE_PMD_NR; nr_huge_updates++; } @@ -460,8 +416,12 @@ static inline long change_pmd_range(struct mmu_gather *tlb, } /* fall through, the trans huge pmd just split */ } - pages += change_pte_range(tlb, vma, pmd, addr, next, - newprot, cp_flags); + + ret = change_pte_range(tlb, vma, pmd, addr, next, newprot, + cp_flags); + if (ret < 0) + goto again; + pages += ret; next: cond_resched(); } while (pmd++, addr = next, addr != end); -- cgit v1.2.3 From a5be621ee2925b6ee2db455c45c2af2d8a195b0c Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:32:47 -0700 Subject: mm/mremap: retry if either pte_offset_map_*lock() fails MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit move_ptes() return -EAGAIN if pte_offset_map_lock() of old fails, or if pte_offset_map_nolock() of new fails: move_page_tables() retry if so. But that does need a pmd_none() check inside, to stop endless loop when huge shmem is truncated (thank you to syzbot); and move_huge_pmd() must tolerate that a page table might have been allocated there just before (of course it would be more satisfying to remove the empty page table, but this is not a path worth optimizing). Link: https://lkml.kernel.org/r/65e5e84a-f04-947-23f2-b97d3462e1e@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/huge_memory.c | 5 +++-- mm/mremap.c | 28 ++++++++++++++++++++-------- 2 files changed, 23 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 9ccdb3fe3244..e21b3e3eb994 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1760,9 +1760,10 @@ bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr, /* * The destination pmd shouldn't be established, free_pgtables() - * should have release it. + * should have released it; but move_page_tables() might have already + * inserted a page table, if racing against shmem/file collapse. */ - if (WARN_ON(!pmd_none(*new_pmd))) { + if (!pmd_none(*new_pmd)) { VM_BUG_ON(pmd_trans_huge(*new_pmd)); return false; } diff --git a/mm/mremap.c b/mm/mremap.c index da107f2c71bf..bfc3d1902a94 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -133,7 +133,7 @@ static pte_t move_soft_dirty_pte(pte_t pte) return pte; } -static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, +static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, unsigned long old_addr, unsigned long old_end, struct vm_area_struct *new_vma, pmd_t *new_pmd, unsigned long new_addr, bool need_rmap_locks) @@ -143,6 +143,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, spinlock_t *old_ptl, *new_ptl; bool force_flush = false; unsigned long len = old_end - old_addr; + int err = 0; /* * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma @@ -170,8 +171,16 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, * pte locks because exclusive mmap_lock prevents deadlock. */ old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl); - new_pte = pte_offset_map(new_pmd, new_addr); - new_ptl = pte_lockptr(mm, new_pmd); + if (!old_pte) { + err = -EAGAIN; + goto out; + } + new_pte = pte_offset_map_nolock(mm, new_pmd, new_addr, &new_ptl); + if (!new_pte) { + pte_unmap_unlock(old_pte, old_ptl); + err = -EAGAIN; + goto out; + } if (new_ptl != old_ptl) spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING); flush_tlb_batched_pending(vma->vm_mm); @@ -208,8 +217,10 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, spin_unlock(new_ptl); pte_unmap(new_pte - 1); pte_unmap_unlock(old_pte - 1, old_ptl); +out: if (need_rmap_locks) drop_rmap_locks(vma); + return err; } #ifndef arch_supports_page_table_move @@ -537,6 +548,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr); if (!new_pmd) break; +again: if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) { if (extent == HPAGE_PMD_SIZE && @@ -544,8 +556,6 @@ unsigned long move_page_tables(struct vm_area_struct *vma, old_pmd, new_pmd, need_rmap_locks)) continue; split_huge_pmd(vma, old_pmd, old_addr); - if (pmd_trans_unstable(old_pmd)) - continue; } else if (IS_ENABLED(CONFIG_HAVE_MOVE_PMD) && extent == PMD_SIZE) { /* @@ -556,11 +566,13 @@ unsigned long move_page_tables(struct vm_area_struct *vma, old_pmd, new_pmd, true)) continue; } - + if (pmd_none(*old_pmd)) + continue; if (pte_alloc(new_vma->vm_mm, new_pmd)) break; - move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma, - new_pmd, new_addr, need_rmap_locks); + if (move_ptes(vma, old_pmd, old_addr, old_addr + extent, + new_vma, new_pmd, new_addr, need_rmap_locks) < 0) + goto again; } mmu_notifier_invalidate_range_end(&range); -- cgit v1.2.3 From f3cd4ab0aabf0c7f25ad438b37954db970174731 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:34:03 -0700 Subject: mm/madvise: clean up pte_offset_map_lock() scans MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Came here to make madvise's several pte_offset_map_lock() scans advance to next extent on failure, and remove superfluous pmd_trans_unstable() and pmd_none_or_trans_huge_or_clear_bad() calls. But also did some nearby cleanup. swapin_walk_pmd_entry(): don't name an address "index"; don't drop the lock after every pte, only when calling out to read_swap_cache_async(). madvise_cold_or_pageout_pte_range() and madvise_free_pte_range(): prefer "start_pte" for pointer, orig_pte usually denotes a saved pte value; leave lazy MMU mode before unlocking; merge the success and failure paths after split_folio(). Link: https://lkml.kernel.org/r/cc4d9a88-9da6-362-50d9-6735c2b125c6@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/madvise.c | 122 +++++++++++++++++++++++++++++++++-------------------------- 1 file changed, 68 insertions(+), 54 deletions(-) (limited to 'mm') diff --git a/mm/madvise.c b/mm/madvise.c index b5ffbaf616f5..0af64c4a8f82 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -188,37 +188,43 @@ success: #ifdef CONFIG_SWAP static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, - unsigned long end, struct mm_walk *walk) + unsigned long end, struct mm_walk *walk) { struct vm_area_struct *vma = walk->private; - unsigned long index; struct swap_iocb *splug = NULL; + pte_t *ptep = NULL; + spinlock_t *ptl; + unsigned long addr; - if (pmd_none_or_trans_huge_or_clear_bad(pmd)) - return 0; - - for (index = start; index != end; index += PAGE_SIZE) { + for (addr = start; addr < end; addr += PAGE_SIZE) { pte_t pte; swp_entry_t entry; struct page *page; - spinlock_t *ptl; - pte_t *ptep; - ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl); - pte = *ptep; - pte_unmap_unlock(ptep, ptl); + if (!ptep++) { + ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!ptep) + break; + } + pte = *ptep; if (!is_swap_pte(pte)) continue; entry = pte_to_swp_entry(pte); if (unlikely(non_swap_entry(entry))) continue; + pte_unmap_unlock(ptep, ptl); + ptep = NULL; + page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, - vma, index, false, &splug); + vma, addr, false, &splug); if (page) put_page(page); } + + if (ptep) + pte_unmap_unlock(ptep, ptl); swap_read_unplug(splug); cond_resched(); @@ -340,7 +346,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd, bool pageout = private->pageout; struct mm_struct *mm = tlb->mm; struct vm_area_struct *vma = walk->vma; - pte_t *orig_pte, *pte, ptent; + pte_t *start_pte, *pte, ptent; spinlock_t *ptl; struct folio *folio = NULL; LIST_HEAD(folio_list); @@ -422,11 +428,11 @@ huge_unlock: } regular_folio: - if (pmd_trans_unstable(pmd)) - return 0; #endif tlb_change_page_size(tlb, PAGE_SIZE); - orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); + if (!start_pte) + return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr < end; pte++, addr += PAGE_SIZE) { @@ -447,25 +453,28 @@ regular_folio: * are sure it's worth. Split it if we are only owner. */ if (folio_test_large(folio)) { + int err; + if (folio_mapcount(folio) != 1) break; if (pageout_anon_only_filter && !folio_test_anon(folio)) break; - folio_get(folio); - if (!folio_trylock(folio)) { - folio_put(folio); - break; - } - pte_unmap_unlock(orig_pte, ptl); - if (split_folio(folio)) { - folio_unlock(folio); - folio_put(folio); - orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!folio_trylock(folio)) break; - } + folio_get(folio); + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + start_pte = NULL; + err = split_folio(folio); folio_unlock(folio); folio_put(folio); - orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (err) + break; + start_pte = pte = + pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!start_pte) + break; + arch_enter_lazy_mmu_mode(); pte--; addr -= PAGE_SIZE; continue; @@ -510,8 +519,10 @@ regular_folio: folio_deactivate(folio); } - arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(orig_pte, ptl); + if (start_pte) { + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + } if (pageout) reclaim_pages(&folio_list); cond_resched(); @@ -612,7 +623,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, struct mm_struct *mm = tlb->mm; struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; - pte_t *orig_pte, *pte, ptent; + pte_t *start_pte, *pte, ptent; struct folio *folio; int nr_swap = 0; unsigned long next; @@ -620,13 +631,12 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, next = pmd_addr_end(addr, end); if (pmd_trans_huge(*pmd)) if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next)) - goto next; - - if (pmd_trans_unstable(pmd)) - return 0; + return 0; tlb_change_page_size(tlb, PAGE_SIZE); - orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!start_pte) + return 0; flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr != end; pte++, addr += PAGE_SIZE) { @@ -664,23 +674,26 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, * deactivate all pages. */ if (folio_test_large(folio)) { + int err; + if (folio_mapcount(folio) != 1) - goto out; + break; + if (!folio_trylock(folio)) + break; folio_get(folio); - if (!folio_trylock(folio)) { - folio_put(folio); - goto out; - } - pte_unmap_unlock(orig_pte, ptl); - if (split_folio(folio)) { - folio_unlock(folio); - folio_put(folio); - orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); - goto out; - } + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + start_pte = NULL; + err = split_folio(folio); folio_unlock(folio); folio_put(folio); - orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (err) + break; + start_pte = pte = + pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!start_pte) + break; + arch_enter_lazy_mmu_mode(); pte--; addr -= PAGE_SIZE; continue; @@ -725,17 +738,18 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, } folio_mark_lazyfree(folio); } -out: + if (nr_swap) { if (current->mm == mm) sync_mm_rss(mm); - add_mm_counter(mm, MM_SWAPENTS, nr_swap); } - arch_leave_lazy_mmu_mode(); - pte_unmap_unlock(orig_pte, ptl); + if (start_pte) { + arch_leave_lazy_mmu_mode(); + pte_unmap_unlock(start_pte, ptl); + } cond_resched(); -next: + return 0; } -- cgit v1.2.3 From 179d3e4f3bfa5947821c1b1bc6aa49a4797b7f21 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:35:14 -0700 Subject: mm/madvise: clean up force_shm_swapin_readahead() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some nearby MADV_WILLNEED cleanup unrelated to pte_offset_map_lock(). shmem_swapin_range() is a better name than force_shm_swapin_readahead(). Fix unimportant off-by-one on end_index. Call the swp_entry_t "entry" rather than "swap": either is okay, but entry is the name used elsewhere in mm/madvise.c. Do not assume GFP_HIGHUSER_MOVABLE: that's right for anon swap, but shmem should take gfp from mapping. Pass the actual vma and address to read_swap_cache_async(), in case a NUMA mempolicy applies. lru_add_drain() at outer level, like madvise_willneed()'s other branch. Link: https://lkml.kernel.org/r/67e18875-ffb3-ec27-346-f350e07bed87@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/madvise.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/madvise.c b/mm/madvise.c index 0af64c4a8f82..9b3c9610052f 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -235,30 +235,34 @@ static const struct mm_walk_ops swapin_walk_ops = { .pmd_entry = swapin_walk_pmd_entry, }; -static void force_shm_swapin_readahead(struct vm_area_struct *vma, +static void shmem_swapin_range(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct address_space *mapping) { XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start)); - pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1); + pgoff_t end_index = linear_page_index(vma, end) - 1; struct page *page; struct swap_iocb *splug = NULL; rcu_read_lock(); xas_for_each(&xas, page, end_index) { - swp_entry_t swap; + unsigned long addr; + swp_entry_t entry; if (!xa_is_value(page)) continue; - swap = radix_to_swp_entry(page); + entry = radix_to_swp_entry(page); /* There might be swapin error entries in shmem mapping. */ - if (non_swap_entry(swap)) + if (non_swap_entry(entry)) continue; + + addr = vma->vm_start + + ((xas.xa_index - vma->vm_pgoff) << PAGE_SHIFT); xas_pause(&xas); rcu_read_unlock(); - page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, - NULL, 0, false, &splug); + page = read_swap_cache_async(entry, mapping_gfp_mask(mapping), + vma, addr, false, &splug); if (page) put_page(page); @@ -266,8 +270,6 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, } rcu_read_unlock(); swap_read_unplug(splug); - - lru_add_drain(); /* Push any new pages onto the LRU now */ } #endif /* CONFIG_SWAP */ @@ -291,8 +293,8 @@ static long madvise_willneed(struct vm_area_struct *vma, } if (shmem_mapping(file->f_mapping)) { - force_shm_swapin_readahead(vma, start, end, - file->f_mapping); + shmem_swapin_range(vma, start, end, file->f_mapping); + lru_add_drain(); /* Push any new pages onto the LRU now */ return 0; } #else -- cgit v1.2.3 From d850fa729873787a4030eed5fd875d00eb63946b Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:36:11 -0700 Subject: mm/swapoff: allow pte_offset_map[_lock]() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjust unuse_pte() and unuse_pte_range() to allow pte_offset_map_lock() and pte_offset_map() failure; remove pmd_none_or_trans_huge_or_clear_bad() from unuse_pmd_range() now that pte_offset_map() does all that itself. Link: https://lkml.kernel.org/r/c4d831-13c3-9dfd-70c2-64514ad951fd@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/swapfile.c | 38 ++++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 18 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index df312534e239..74dd4d2337b7 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1757,7 +1757,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, hwposioned = true; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); - if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { + if (unlikely(!pte || !pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { ret = 0; goto out; } @@ -1810,7 +1810,8 @@ setpte: set_pte_at(vma->vm_mm, addr, pte, new_pte); swap_free(entry); out: - pte_unmap_unlock(pte, ptl); + if (pte) + pte_unmap_unlock(pte, ptl); if (page != swapcache) { unlock_page(page); put_page(page); @@ -1822,17 +1823,22 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, unsigned long end, unsigned int type) { - swp_entry_t entry; - pte_t *pte; + pte_t *pte = NULL; struct swap_info_struct *si; - int ret = 0; si = swap_info[type]; - pte = pte_offset_map(pmd, addr); do { struct folio *folio; unsigned long offset; unsigned char swp_count; + swp_entry_t entry; + int ret; + + if (!pte++) { + pte = pte_offset_map(pmd, addr); + if (!pte) + break; + } if (!is_swap_pte(*pte)) continue; @@ -1843,6 +1849,8 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, offset = swp_offset(entry); pte_unmap(pte); + pte = NULL; + folio = swap_cache_get_folio(entry, vma, addr); if (!folio) { struct page *page; @@ -1861,8 +1869,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (!folio) { swp_count = READ_ONCE(si->swap_map[offset]); if (swp_count == 0 || swp_count == SWAP_MAP_BAD) - goto try_next; - + continue; return -ENOMEM; } @@ -1872,20 +1879,17 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, if (ret < 0) { folio_unlock(folio); folio_put(folio); - goto out; + return ret; } folio_free_swap(folio); folio_unlock(folio); folio_put(folio); -try_next: - pte = pte_offset_map(pmd, addr); - } while (pte++, addr += PAGE_SIZE, addr != end); - pte_unmap(pte - 1); + } while (addr += PAGE_SIZE, addr != end); - ret = 0; -out: - return ret; + if (pte) + pte_unmap(pte); + return 0; } static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, @@ -1900,8 +1904,6 @@ static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, do { cond_resched(); next = pmd_addr_end(addr, end); - if (pmd_none_or_trans_huge_or_clear_bad(pmd)) - continue; ret = unuse_pte_range(vma, pmd, addr, next, type); if (ret) return ret; -- cgit v1.2.3 From 52fc048320adf1b1c07a2627461dca9f7d7956ff Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:37:12 -0700 Subject: mm/mglru: allow pte_offset_map_nolock() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MGLRU's walk_pte_range() use the safer pte_offset_map_nolock(), rather than pte_lockptr(), to get the ptl for its trylock. Just return false and move on to next extent if it fails, like when the trylock fails. Remove the VM_WARN_ON_ONCE(pmd_leaf) since that will happen, rarely. Link: https://lkml.kernel.org/r/51ece73e-7398-2e4a-2384-56708c87844f@google.com Signed-off-by: Hugh Dickins Acked-by: Yu Zhao Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/vmscan.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index a4e2936f6d35..3f64c8d9f629 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4024,15 +4024,15 @@ static bool walk_pte_range(pmd_t *pmd, unsigned long start, unsigned long end, struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); int old_gen, new_gen = lru_gen_from_seq(walk->max_seq); - VM_WARN_ON_ONCE(pmd_leaf(*pmd)); - - ptl = pte_lockptr(args->mm, pmd); - if (!spin_trylock(ptl)) + pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl); + if (!pte) return false; + if (!spin_trylock(ptl)) { + pte_unmap(pte); + return false; + } arch_enter_lazy_mmu_mode(); - - pte = pte_offset_map(pmd, start & PMD_MASK); restart: for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) { unsigned long pfn; @@ -4073,10 +4073,8 @@ restart: if (i < PTRS_PER_PTE && get_next_vma(PMD_MASK, PAGE_SIZE, args, &start, &end)) goto restart; - pte_unmap(pte); - arch_leave_lazy_mmu_mode(); - spin_unlock(ptl); + pte_unmap_unlock(pte, ptl); return suitable_to_scan(total, young); } -- cgit v1.2.3 From 4b56069c95d69bfce0c0ffb2531d08216268a972 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:38:17 -0700 Subject: mm/migrate_device: allow pte_offset_map_lock() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit migrate_vma_collect_pmd(): remove the pmd_trans_unstable() handling after splitting huge zero pmd, and the pmd_none() handling after successfully splitting huge page: those are now managed inside pte_offset_map_lock(), and by "goto again" when it fails. But the skip after unsuccessful split_huge_page() must stay: it avoids an endless loop. The skip when pmd_bad()? Remove that: it will be treated as a hole rather than a skip once cleared by pte_offset_map_lock(), but with different timing that would be so anyway; and it's arguably best to leave the pmd_bad() handling centralized there. migrate_vma_insert_page(): remove comment on the old pte_offset_map() and old locking limitations; remove the pmd_trans_unstable() check and just proceed to pte_offset_map_lock(), aborting when it fails (page has been charged to memcg, but as in other cases, it's uncharged when freed). Link: https://lkml.kernel.org/r/1131be62-2e84-da2f-8f45-807b2cbeeec5@google.com Signed-off-by: Hugh Dickins Reviewed-by: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/migrate_device.c | 31 ++++--------------------------- 1 file changed, 4 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/migrate_device.c b/mm/migrate_device.c index d30c9de60b0d..a14af6b12b04 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -83,9 +83,6 @@ again: if (is_huge_zero_page(page)) { spin_unlock(ptl); split_huge_pmd(vma, pmdp, addr); - if (pmd_trans_unstable(pmdp)) - return migrate_vma_collect_skip(start, end, - walk); } else { int ret; @@ -100,16 +97,12 @@ again: if (ret) return migrate_vma_collect_skip(start, end, walk); - if (pmd_none(*pmdp)) - return migrate_vma_collect_hole(start, end, -1, - walk); } } - if (unlikely(pmd_bad(*pmdp))) - return migrate_vma_collect_skip(start, end, walk); - ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); + if (!ptep) + goto again; arch_enter_lazy_mmu_mode(); for (; addr < end; addr += PAGE_SIZE, ptep++) { @@ -595,27 +588,10 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, pmdp = pmd_alloc(mm, pudp, addr); if (!pmdp) goto abort; - if (pmd_trans_huge(*pmdp) || pmd_devmap(*pmdp)) goto abort; - - /* - * Use pte_alloc() instead of pte_alloc_map(). We can't run - * pte_offset_map() on pmds where a huge pmd might be created - * from a different thread. - * - * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when - * parallel threads are excluded by other means. - * - * Here we only have mmap_read_lock(mm). - */ if (pte_alloc(mm, pmdp)) goto abort; - - /* See the comment in pte_alloc_one_map() */ - if (unlikely(pmd_trans_unstable(pmdp))) - goto abort; - if (unlikely(anon_vma_prepare(vma))) goto abort; if (mem_cgroup_charge(page_folio(page), vma->vm_mm, GFP_KERNEL)) @@ -650,7 +626,8 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, } ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); - + if (!ptep) + goto abort; if (check_stable_address_space(mm)) goto unlock_abort; -- cgit v1.2.3 From 2378118bd9da1d96d8569ff675183e2cf8973799 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:39:59 -0700 Subject: mm/gup: remove FOLL_SPLIT_PMD use of pmd_trans_unstable() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is now no reason for follow_pmd_mask()'s FOLL_SPLIT_PMD block to distinguish huge_zero_page from a normal THP: follow_page_pte() handles any instability, and here it's a good idea to replace any pmd_none(*pmd) by a page table a.s.a.p, in the huge_zero_page case as for a normal THP; and this removes an unnecessary possibility of -EBUSY failure. (Hmm, couldn't the normal THP case have hit an unstably refaulted THP before? But there are only two, exceptional, users of FOLL_SPLIT_PMD.) Link: https://lkml.kernel.org/r/59fd15dd-4d39-5ec-2043-1d5117f7f85@google.com Signed-off-by: Hugh Dickins Reviewed-by: Yang Shi Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/gup.c | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index 598e8c98367b..838db6c0bfc2 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -682,21 +682,10 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma, return follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } if (flags & FOLL_SPLIT_PMD) { - int ret; - page = pmd_page(*pmd); - if (is_huge_zero_page(page)) { - spin_unlock(ptl); - ret = 0; - split_huge_pmd(vma, pmd, address); - if (pmd_trans_unstable(pmd)) - ret = -EBUSY; - } else { - spin_unlock(ptl); - split_huge_pmd(vma, pmd, address); - ret = pte_alloc(mm, pmd) ? -ENOMEM : 0; - } - - return ret ? ERR_PTR(ret) : + spin_unlock(ptl); + split_huge_pmd(vma, pmd, address); + /* If pmd was left empty, stuff a page table in there quickly */ + return pte_alloc(mm, pmd) ? ERR_PTR(-ENOMEM) : follow_page_pte(vma, address, pmd, flags, &ctx->pgmap); } page = follow_trans_huge_pmd(vma, address, pmd, flags); -- cgit v1.2.3 From c9c1ee20ee84b1a827437ca6dba2f06fe475d9b1 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:41:31 -0700 Subject: mm/huge_memory: split huge pmd under one pte_offset_map() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit __split_huge_zero_page_pmd() use a single pte_offset_map() to sweep the extent: it's already under pmd_lock(), so this is no worse for latency; and since it's supposed to have full control of the just-withdrawn page table, here choose to VM_BUG_ON if it were to fail. And please don't increment haddr by PAGE_SIZE, that should remain huge aligned: declare a separate addr (not a bugfix, but it was deceptive). __split_huge_pmd_locked() likewise (but it had declared a separate addr); and change its BUG_ON(!pte_none) to VM_BUG_ON, for consistency with zero (those deposited page tables are sometimes victims of random corruption). Link: https://lkml.kernel.org/r/90cbed7f-90d9-b779-4a46-d2485baf9595@google.com Signed-off-by: Hugh Dickins Reviewed-by: Yang Shi Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/huge_memory.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e21b3e3eb994..31bc8fa768e3 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2037,6 +2037,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, struct mm_struct *mm = vma->vm_mm; pgtable_t pgtable; pmd_t _pmd, old_pmd; + unsigned long addr; + pte_t *pte; int i; /* @@ -2052,17 +2054,20 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, pgtable = pgtable_trans_huge_withdraw(mm, pmd); pmd_populate(mm, &_pmd, pgtable); - for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { - pte_t *pte, entry; - entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); + pte = pte_offset_map(&_pmd, haddr); + VM_BUG_ON(!pte); + for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { + pte_t entry; + + entry = pfn_pte(my_zero_pfn(addr), vma->vm_page_prot); entry = pte_mkspecial(entry); if (pmd_uffd_wp(old_pmd)) entry = pte_mkuffd_wp(entry); - pte = pte_offset_map(&_pmd, haddr); VM_BUG_ON(!pte_none(*pte)); - set_pte_at(mm, haddr, pte, entry); - pte_unmap(pte); + set_pte_at(mm, addr, pte, entry); + pte++; } + pte_unmap(pte - 1); smp_wmb(); /* make pte visible before pmd */ pmd_populate(mm, pmd, pgtable); } @@ -2077,6 +2082,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, bool young, write, soft_dirty, pmd_migration = false, uffd_wp = false; bool anon_exclusive = false, dirty = false; unsigned long addr; + pte_t *pte; int i; VM_BUG_ON(haddr & ~HPAGE_PMD_MASK); @@ -2205,8 +2211,10 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, pgtable = pgtable_trans_huge_withdraw(mm, pmd); pmd_populate(mm, &_pmd, pgtable); + pte = pte_offset_map(&_pmd, haddr); + VM_BUG_ON(!pte); for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) { - pte_t entry, *pte; + pte_t entry; /* * Note that NUMA hinting access restrictions are not * transferred to avoid any possibility of altering @@ -2249,11 +2257,11 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_mkuffd_wp(entry); page_add_anon_rmap(page + i, vma, addr, false); } - pte = pte_offset_map(&_pmd, addr); - BUG_ON(!pte_none(*pte)); + VM_BUG_ON(!pte_none(*pte)); set_pte_at(mm, addr, pte, entry); - pte_unmap(pte); + pte++; } + pte_unmap(pte - 1); if (!pmd_migration) page_remove_rmap(page, vma, true); -- cgit v1.2.3 From 895f5ee464cc90a5f38163720431c849e93ead97 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:42:40 -0700 Subject: mm/khugepaged: allow pte_offset_map[_lock]() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit __collapse_huge_page_swapin(): don't drop the map after every pte, it only has to be dropped by do_swap_page(); give up if pte_offset_map() fails; trace_mm_collapse_huge_page_swapin() at the end, with result; fix comment on returned result; fix vmf.pgoff, though it's not used. collapse_huge_page(): use pte_offset_map_lock() on the _pmd returned from clearing; allow failure, but it should be impossible there. hpage_collapse_scan_pmd() and collapse_pte_mapped_thp() allow for pte_offset_map_lock() failure. Link: https://lkml.kernel.org/r/6513e85-d798-34ec-3762-7c24ffb9329@google.com Signed-off-by: Hugh Dickins Reviewed-by: Yang Shi Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/khugepaged.c | 72 +++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 2d206e62d358..04c22b5309d7 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -991,9 +991,8 @@ static int check_pmd_still_valid(struct mm_struct *mm, * Only done if hpage_collapse_scan_pmd believes it is worthwhile. * * Called and returns without pte mapped or spinlocks held. - * Note that if false is returned, mmap_lock will be released. + * Returns result: if not SCAN_SUCCEED, mmap_lock has been released. */ - static int __collapse_huge_page_swapin(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long haddr, pmd_t *pmd, @@ -1002,23 +1001,35 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, int swapped_in = 0; vm_fault_t ret = 0; unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); + int result; + pte_t *pte = NULL; for (address = haddr; address < end; address += PAGE_SIZE) { struct vm_fault vmf = { .vma = vma, .address = address, - .pgoff = linear_page_index(vma, haddr), + .pgoff = linear_page_index(vma, address), .flags = FAULT_FLAG_ALLOW_RETRY, .pmd = pmd, }; - vmf.pte = pte_offset_map(pmd, address); - vmf.orig_pte = *vmf.pte; - if (!is_swap_pte(vmf.orig_pte)) { - pte_unmap(vmf.pte); - continue; + if (!pte++) { + pte = pte_offset_map(pmd, address); + if (!pte) { + mmap_read_unlock(mm); + result = SCAN_PMD_NULL; + goto out; + } } + + vmf.orig_pte = *pte; + if (!is_swap_pte(vmf.orig_pte)) + continue; + + vmf.pte = pte; ret = do_swap_page(&vmf); + /* Which unmaps pte (after perhaps re-checking the entry) */ + pte = NULL; /* * do_swap_page returns VM_FAULT_RETRY with released mmap_lock. @@ -1027,24 +1038,29 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, * resulting in later failure. */ if (ret & VM_FAULT_RETRY) { - trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); /* Likely, but not guaranteed, that page lock failed */ - return SCAN_PAGE_LOCK; + result = SCAN_PAGE_LOCK; + goto out; } if (ret & VM_FAULT_ERROR) { mmap_read_unlock(mm); - trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0); - return SCAN_FAIL; + result = SCAN_FAIL; + goto out; } swapped_in++; } + if (pte) + pte_unmap(pte); + /* Drain LRU add pagevec to remove extra pin on the swapped in pages */ if (swapped_in) lru_add_drain(); - trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1); - return SCAN_SUCCEED; + result = SCAN_SUCCEED; +out: + trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, result); + return result; } static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm, @@ -1144,9 +1160,6 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, address + HPAGE_PMD_SIZE); mmu_notifier_invalidate_range_start(&range); - pte = pte_offset_map(pmd, address); - pte_ptl = pte_lockptr(mm, pmd); - pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */ /* * This removes any huge TLB entry from the CPU so we won't allow @@ -1161,13 +1174,18 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address, mmu_notifier_invalidate_range_end(&range); tlb_remove_table_sync_one(); - spin_lock(pte_ptl); - result = __collapse_huge_page_isolate(vma, address, pte, cc, - &compound_pagelist); - spin_unlock(pte_ptl); + pte = pte_offset_map_lock(mm, &_pmd, address, &pte_ptl); + if (pte) { + result = __collapse_huge_page_isolate(vma, address, pte, cc, + &compound_pagelist); + spin_unlock(pte_ptl); + } else { + result = SCAN_PMD_NULL; + } if (unlikely(result != SCAN_SUCCEED)) { - pte_unmap(pte); + if (pte) + pte_unmap(pte); spin_lock(pmd_ptl); BUG_ON(!pmd_none(*pmd)); /* @@ -1251,6 +1269,11 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, memset(cc->node_load, 0, sizeof(cc->node_load)); nodes_clear(cc->alloc_nmask); pte = pte_offset_map_lock(mm, pmd, address, &ptl); + if (!pte) { + result = SCAN_PMD_NULL; + goto out; + } + for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { pte_t pteval = *_pte; @@ -1620,8 +1643,10 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, * lockless_pages_from_mm() and the hardware page walker can access page * tables while all the high-level locks are held in write mode. */ - start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); result = SCAN_FAIL; + start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl); + if (!start_pte) + goto drop_immap; /* step 1: check all mapped PTEs are to the right huge page */ for (i = 0, addr = haddr, pte = start_pte; @@ -1695,6 +1720,7 @@ drop_hpage: abort: pte_unmap_unlock(start_pte, ptl); +drop_immap: i_mmap_unlock_write(vma->vm_file->f_mapping); goto drop_hpage; } -- cgit v1.2.3 From 3db82b9374ca921b8b820a75e83809d5c4133d8f Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:43:38 -0700 Subject: mm/memory: allow pte_offset_map[_lock]() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit copy_pte_range(): use pte_offset_map_nolock(), and allow for it to fail; but with a comment on some further assumptions that are being made there. zap_pte_range() and zap_pmd_range(): adjust their interaction so that a pte_offset_map_lock() failure in zap_pte_range() leads to a retry in zap_pmd_range(); remove call to pmd_none_or_trans_huge_or_clear_bad(). Allow pte_offset_map_lock() to fail in many functions. Update comment on calling pte_alloc() in do_anonymous_page(). Remove redundant calls to pmd_trans_unstable(), pmd_devmap_trans_unstable(), pmd_none() and pmd_bad(); but leave pmd_none_or_clear_bad() calls in free_pmd_range() and copy_pmd_range(), those do simplify the next level down. Link: https://lkml.kernel.org/r/bb548d50-e99a-f29e-eab1-a43bef2a1287@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/memory.c | 167 +++++++++++++++++++++++++++++------------------------------- 1 file changed, 81 insertions(+), 86 deletions(-) (limited to 'mm') diff --git a/mm/memory.c b/mm/memory.c index 221b21623644..4ab4de234b76 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1012,13 +1012,25 @@ again: progress = 0; init_rss_vec(rss); + /* + * copy_pmd_range()'s prior pmd_none_or_clear_bad(src_pmd), and the + * error handling here, assume that exclusive mmap_lock on dst and src + * protects anon from unexpected THP transitions; with shmem and file + * protected by mmap_lock-less collapse skipping areas with anon_vma + * (whereas vma_needs_copy() skips areas without anon_vma). A rework + * can remove such assumptions later, but this is good enough for now. + */ dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); if (!dst_pte) { ret = -ENOMEM; goto out; } - src_pte = pte_offset_map(src_pmd, addr); - src_ptl = pte_lockptr(src_mm, src_pmd); + src_pte = pte_offset_map_nolock(src_mm, src_pmd, addr, &src_ptl); + if (!src_pte) { + pte_unmap_unlock(dst_pte, dst_ptl); + /* ret == 0 */ + goto out; + } spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); orig_src_pte = src_pte; orig_dst_pte = dst_pte; @@ -1083,8 +1095,7 @@ again: } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); arch_leave_lazy_mmu_mode(); - spin_unlock(src_ptl); - pte_unmap(orig_src_pte); + pte_unmap_unlock(orig_src_pte, src_ptl); add_mm_rss_vec(dst_mm, rss); pte_unmap_unlock(orig_dst_pte, dst_ptl); cond_resched(); @@ -1388,10 +1399,11 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, swp_entry_t entry; tlb_change_page_size(tlb, PAGE_SIZE); -again: init_rss_vec(rss); - start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl); - pte = start_pte; + start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!pte) + return addr; + flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); do { @@ -1507,17 +1519,10 @@ again: * If we forced a TLB flush (either due to running out of * batch buffers or because we needed to flush dirty TLB * entries before releasing the ptl), free the batched - * memory too. Restart if we didn't do everything. + * memory too. Come back again if we didn't do everything. */ - if (force_flush) { - force_flush = 0; + if (force_flush) tlb_flush_mmu(tlb); - } - - if (addr != end) { - cond_resched(); - goto again; - } return addr; } @@ -1536,8 +1541,10 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) { if (next - addr != HPAGE_PMD_SIZE) __split_huge_pmd(vma, pmd, addr, false, NULL); - else if (zap_huge_pmd(tlb, vma, pmd, addr)) - goto next; + else if (zap_huge_pmd(tlb, vma, pmd, addr)) { + addr = next; + continue; + } /* fall through */ } else if (details && details->single_folio && folio_test_pmd_mappable(details->single_folio) && @@ -1550,20 +1557,14 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, */ spin_unlock(ptl); } - - /* - * Here there can be other concurrent MADV_DONTNEED or - * trans huge page faults running, and if the pmd is - * none or trans huge it can change under us. This is - * because MADV_DONTNEED holds the mmap_lock in read - * mode. - */ - if (pmd_none_or_trans_huge_or_clear_bad(pmd)) - goto next; - next = zap_pte_range(tlb, vma, pmd, addr, next, details); -next: - cond_resched(); - } while (pmd++, addr = next, addr != end); + if (pmd_none(*pmd)) { + addr = next; + continue; + } + addr = zap_pte_range(tlb, vma, pmd, addr, next, details); + if (addr != next) + pmd--; + } while (pmd++, cond_resched(), addr != end); return addr; } @@ -1905,6 +1906,10 @@ more: const int batch_size = min_t(int, pages_to_write_in_pmd, 8); start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock); + if (!start_pte) { + ret = -EFAULT; + goto out; + } for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) { int err = insert_page_in_batch_locked(vma, pte, addr, pages[curr_page_idx], prot); @@ -2572,10 +2577,10 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, mapped_pte = pte = (mm == &init_mm) ? pte_offset_kernel(pmd, addr) : pte_offset_map_lock(mm, pmd, addr, &ptl); + if (!pte) + return -EINVAL; } - BUG_ON(pmd_huge(*pmd)); - arch_enter_lazy_mmu_mode(); if (fn) { @@ -2804,7 +2809,6 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, int ret; void *kaddr; void __user *uaddr; - bool locked = false; struct vm_area_struct *vma = vmf->vma; struct mm_struct *mm = vma->vm_mm; unsigned long addr = vmf->address; @@ -2830,12 +2834,12 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, * On architectures with software "accessed" bits, we would * take a double page fault, so mark it accessed here. */ + vmf->pte = NULL; if (!arch_has_hw_pte_young() && !pte_young(vmf->orig_pte)) { pte_t entry; vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); - locked = true; - if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) { /* * Other thread has already handled the fault * and update local tlb only @@ -2857,13 +2861,12 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, * zeroes. */ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) { - if (locked) + if (vmf->pte) goto warn; /* Re-validate under PTL if the page is still mapped */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); - locked = true; - if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ update_mmu_tlb(vma, addr, vmf->pte); ret = -EAGAIN; @@ -2888,7 +2891,7 @@ warn: ret = 0; pte_unlock: - if (locked) + if (vmf->pte) pte_unmap_unlock(vmf->pte, vmf->ptl); kunmap_atomic(kaddr); flush_dcache_page(dst); @@ -3110,7 +3113,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * Re-check the pte - we dropped the lock */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(pte_same(*vmf->pte, vmf->orig_pte))) { + if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) { if (old_folio) { if (!folio_test_anon(old_folio)) { dec_mm_counter(mm, mm_counter_file(&old_folio->page)); @@ -3178,19 +3181,20 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) /* Free the old page.. */ new_folio = old_folio; page_copied = 1; - } else { + pte_unmap_unlock(vmf->pte, vmf->ptl); + } else if (vmf->pte) { update_mmu_tlb(vma, vmf->address, vmf->pte); + pte_unmap_unlock(vmf->pte, vmf->ptl); } - if (new_folio) - folio_put(new_folio); - - pte_unmap_unlock(vmf->pte, vmf->ptl); /* * No need to double call mmu_notifier->invalidate_range() callback as * the above ptep_clear_flush_notify() did already call it. */ mmu_notifier_invalidate_range_only_end(&range); + + if (new_folio) + folio_put(new_folio); if (old_folio) { if (page_copied) free_swap_cache(&old_folio->page); @@ -3230,6 +3234,8 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) WARN_ON_ONCE(!(vmf->vma->vm_flags & VM_SHARED)); vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!vmf->pte) + return VM_FAULT_NOPAGE; /* * We might have raced with another page fault while we released the * pte_offset_map_lock. @@ -3591,10 +3597,11 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(pte_same(*vmf->pte, vmf->orig_pte))) + if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); - pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); folio_unlock(folio); folio_put(folio); @@ -3625,6 +3632,8 @@ static vm_fault_t pte_marker_clear(struct vm_fault *vmf) { vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!vmf->pte) + return 0; /* * Be careful so that we will only recover a special uffd-wp pte into a * none pte. Otherwise it means the pte could have changed, so retry. @@ -3728,7 +3737,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vmf->page = pfn_swap_entry_to_page(entry); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) + if (unlikely(!vmf->pte || + !pte_same(*vmf->pte, vmf->orig_pte))) goto unlock; /* @@ -3805,7 +3815,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(pte_same(*vmf->pte, vmf->orig_pte))) + if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) ret = VM_FAULT_OOM; goto unlock; } @@ -3875,7 +3885,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) + if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) goto out_nomap; if (unlikely(!folio_test_uptodate(folio))) { @@ -4001,13 +4011,15 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, vmf->address, vmf->pte); unlock: - pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); out: if (si) put_swap_device(si); return ret; out_nomap: - pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); out_page: folio_unlock(folio); out_release: @@ -4039,22 +4051,12 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) return VM_FAULT_SIGBUS; /* - * Use pte_alloc() instead of pte_alloc_map(). We can't run - * pte_offset_map() on pmds where a huge pmd might be created - * from a different thread. - * - * pte_alloc_map() is safe to use under mmap_write_lock(mm) or when - * parallel threads are excluded by other means. - * - * Here we only have mmap_read_lock(mm). + * Use pte_alloc() instead of pte_alloc_map(), so that OOM can + * be distinguished from a transient failure of pte_offset_map(). */ if (pte_alloc(vma->vm_mm, vmf->pmd)) return VM_FAULT_OOM; - /* See comment in handle_pte_fault() */ - if (unlikely(pmd_trans_unstable(vmf->pmd))) - return 0; - /* Use the zero-page for reads */ if (!(vmf->flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(vma->vm_mm)) { @@ -4062,6 +4064,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) vma->vm_page_prot)); vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!vmf->pte) + goto unlock; if (vmf_pte_changed(vmf)) { update_mmu_tlb(vma, vmf->address, vmf->pte); goto unlock; @@ -4102,6 +4106,8 @@ static vm_fault_t do_anonymous_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!vmf->pte) + goto release; if (vmf_pte_changed(vmf)) { update_mmu_tlb(vma, vmf->address, vmf->pte); goto release; @@ -4129,7 +4135,8 @@ setpte: /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, vmf->address, vmf->pte); unlock: - pte_unmap_unlock(vmf->pte, vmf->ptl); + if (vmf->pte) + pte_unmap_unlock(vmf->pte, vmf->ptl); return ret; release: folio_put(folio); @@ -4378,15 +4385,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf) return VM_FAULT_OOM; } - /* - * See comment in handle_pte_fault() for how this scenario happens, we - * need to return NOPAGE so that we drop this page. - */ - if (pmd_devmap_trans_unstable(vmf->pmd)) - return VM_FAULT_NOPAGE; - vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); + if (!vmf->pte) + return VM_FAULT_NOPAGE; /* Re-check under ptl */ if (likely(!vmf_pte_changed(vmf))) { @@ -4628,17 +4630,11 @@ static vm_fault_t do_fault(struct vm_fault *vmf) * The VMA was not fully populated on mmap() or missing VM_DONTEXPAND */ if (!vma->vm_ops->fault) { - /* - * If we find a migration pmd entry or a none pmd entry, which - * should never happen, return SIGBUS - */ - if (unlikely(!pmd_present(*vmf->pmd))) + vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (unlikely(!vmf->pte)) ret = VM_FAULT_SIGBUS; else { - vmf->pte = pte_offset_map_lock(vmf->vma->vm_mm, - vmf->pmd, - vmf->address, - &vmf->ptl); /* * Make sure this is not a temporary clearing of pte * by holding ptl and checking again. A R/M/W update @@ -5427,10 +5423,9 @@ int follow_pte(struct mm_struct *mm, unsigned long address, pmd = pmd_offset(pud, address); VM_BUG_ON(pmd_trans_huge(*pmd)); - if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) - goto out; - ptep = pte_offset_map_lock(mm, pmd, address, ptlp); + if (!ptep) + goto out; if (!pte_present(*ptep)) goto unlock; *ptepp = ptep; -- cgit v1.2.3 From c7ad08804fae5baa7f71c0790038e8259e1066a5 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:45:05 -0700 Subject: mm/memory: handle_pte_fault() use pte_offset_map_nolock() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit handle_pte_fault() use pte_offset_map_nolock() to get the vmf.ptl which corresponds to vmf.pte, instead of pte_lockptr() being used later, when there's a chance that the pmd entry might have changed, perhaps to none, or to a huge pmd, with no split ptlock in its struct page. Remove its pmd_devmap_trans_unstable() call: pte_offset_map_nolock() will handle that case by failing. Update the "morph" comment above, looking forward to when shmem or file collapse to THP may not take mmap_lock for write (or not at all). do_numa_page() use the vmf->ptl from handle_pte_fault() at first, but refresh it when refreshing vmf->pte. do_swap_page()'s pte_unmap_same() (the thing that takes ptl to verify a two-part PAE orig_pte) use the vmf->ptl from handle_pte_fault() too; but do_swap_page() is also used by anon THP's __collapse_huge_page_swapin(), so adjust that to set vmf->ptl by pte_offset_map_nolock(). Link: https://lkml.kernel.org/r/c1107654-3929-60ac-223e-6877cbb86065@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- mm/khugepaged.c | 6 ++++-- mm/memory.c | 38 +++++++++++++------------------------- 2 files changed, 17 insertions(+), 27 deletions(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 04c22b5309d7..d1951ed572f8 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1003,6 +1003,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, unsigned long address, end = haddr + (HPAGE_PMD_NR * PAGE_SIZE); int result; pte_t *pte = NULL; + spinlock_t *ptl; for (address = haddr; address < end; address += PAGE_SIZE) { struct vm_fault vmf = { @@ -1014,7 +1015,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, }; if (!pte++) { - pte = pte_offset_map(pmd, address); + pte = pte_offset_map_nolock(mm, pmd, address, &ptl); if (!pte) { mmap_read_unlock(mm); result = SCAN_PMD_NULL; @@ -1022,11 +1023,12 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, } } - vmf.orig_pte = *pte; + vmf.orig_pte = ptep_get_lockless(pte); if (!is_swap_pte(vmf.orig_pte)) continue; vmf.pte = pte; + vmf.ptl = ptl; ret = do_swap_page(&vmf); /* Which unmaps pte (after perhaps re-checking the entry) */ pte = NULL; diff --git a/mm/memory.c b/mm/memory.c index 4ab4de234b76..11f221953690 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2786,10 +2786,9 @@ static inline int pte_unmap_same(struct vm_fault *vmf) int same = 1; #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) if (sizeof(pte_t) > sizeof(unsigned long)) { - spinlock_t *ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); - spin_lock(ptl); + spin_lock(vmf->ptl); same = pte_same(*vmf->pte, vmf->orig_pte); - spin_unlock(ptl); + spin_unlock(vmf->ptl); } #endif pte_unmap(vmf->pte); @@ -4697,7 +4696,6 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * validation through pte_unmap_same(). It's of NUMA type but * the pfn may be screwed if the read is non atomic. */ - vmf->ptl = pte_lockptr(vma->vm_mm, vmf->pmd); spin_lock(vmf->ptl); if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); @@ -4768,8 +4766,10 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) flags |= TNF_MIGRATED; } else { flags |= TNF_MIGRATE_FAIL; - vmf->pte = pte_offset_map(vmf->pmd, vmf->address); - spin_lock(vmf->ptl); + vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (unlikely(!vmf->pte)) + goto out; if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; @@ -4898,27 +4898,16 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) vmf->pte = NULL; vmf->flags &= ~FAULT_FLAG_ORIG_PTE_VALID; } else { - /* - * If a huge pmd materialized under us just retry later. Use - * pmd_trans_unstable() via pmd_devmap_trans_unstable() instead - * of pmd_trans_huge() to ensure the pmd didn't become - * pmd_trans_huge under us and then back to pmd_none, as a - * result of MADV_DONTNEED running immediately after a huge pmd - * fault in a different thread of this mm, in turn leading to a - * misleading pmd_trans_huge() retval. All we have to ensure is - * that it is a regular pmd that we can walk with - * pte_offset_map() and we can do that through an atomic read - * in C, which is what pmd_trans_unstable() provides. - */ - if (pmd_devmap_trans_unstable(vmf->pmd)) - return 0; /* * A regular pmd is established and it can't morph into a huge - * pmd from under us anymore at this point because we hold the - * mmap_lock read mode and khugepaged takes it in write mode. - * So now it's safe to run pte_offset_map(). + * pmd by anon khugepaged, since that takes mmap_lock in write + * mode; but shmem or file collapse to THP could still morph + * it into a huge pmd: just retry later if so. */ - vmf->pte = pte_offset_map(vmf->pmd, vmf->address); + vmf->pte = pte_offset_map_nolock(vmf->vma->vm_mm, vmf->pmd, + vmf->address, &vmf->ptl); + if (unlikely(!vmf->pte)) + return 0; vmf->orig_pte = ptep_get_lockless(vmf->pte); vmf->flags |= FAULT_FLAG_ORIG_PTE_VALID; @@ -4937,7 +4926,6 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) if (pte_protnone(vmf->orig_pte) && vma_is_accessible(vmf->vma)) return do_numa_page(vmf); - vmf->ptl = pte_lockptr(vmf->vma->vm_mm, vmf->pmd); spin_lock(vmf->ptl); entry = vmf->orig_pte; if (unlikely(!pte_same(*vmf->pte, entry))) { -- cgit v1.2.3 From feda5c393a6c843c7bf1fc49e1381e2d3822b564 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:50:37 -0700 Subject: mm/pgtable: delete pmd_trans_unstable() and friends MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Delete pmd_trans_unstable, pmd_none_or_trans_huge_or_clear_bad() and pmd_devmap_trans_unstable(), all now unused. With mixed feelings, delete all the comments on pmd_trans_unstable(). That was very good documentation of a subtle state, and this series does not even eliminate that state: but rather, normalizes and extends it, asking pte_offset_map[_lock]() callers to anticipate failure, without regard for whether mmap_read_lock() or mmap_write_lock() is held. Retain pud_trans_unstable(), which has one use in __handle_mm_fault(), but delete its equivalent pud_none_or_trans_huge_or_dev_or_clear_bad(). While there, move the default arch_needs_pgtable_deposit() definition up near where pgtable_trans_huge_deposit() and withdraw() are declared. Link: https://lkml.kernel.org/r/5abdab3-3136-b42e-274d-9c6281bfb79@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- include/linux/pgtable.h | 103 ++++-------------------------------------------- mm/khugepaged.c | 4 -- 2 files changed, 7 insertions(+), 100 deletions(-) (limited to 'mm') diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 3fabbb018557..a1326e61d7ee 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -599,6 +599,10 @@ extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp, extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp); #endif +#ifndef arch_needs_pgtable_deposit +#define arch_needs_pgtable_deposit() (false) +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* * This is an implementation of pmdp_establish() that is only suitable for an @@ -1300,9 +1304,10 @@ static inline int pud_trans_huge(pud_t pud) } #endif -/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */ -static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud) +static inline int pud_trans_unstable(pud_t *pud) { +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ + defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) pud_t pudval = READ_ONCE(*pud); if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) @@ -1311,104 +1316,10 @@ static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud) pud_clear_bad(pud); return 1; } - return 0; -} - -/* See pmd_trans_unstable for discussion. */ -static inline int pud_trans_unstable(pud_t *pud) -{ -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ - defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) - return pud_none_or_trans_huge_or_dev_or_clear_bad(pud); -#else - return 0; #endif -} - -#ifndef arch_needs_pgtable_deposit -#define arch_needs_pgtable_deposit() (false) -#endif -/* - * This function is meant to be used by sites walking pagetables with - * the mmap_lock held in read mode to protect against MADV_DONTNEED and - * transhuge page faults. MADV_DONTNEED can convert a transhuge pmd - * into a null pmd and the transhuge page fault can convert a null pmd - * into an hugepmd or into a regular pmd (if the hugepage allocation - * fails). While holding the mmap_lock in read mode the pmd becomes - * stable and stops changing under us only if it's not null and not a - * transhuge pmd. When those races occurs and this function makes a - * difference vs the standard pmd_none_or_clear_bad, the result is - * undefined so behaving like if the pmd was none is safe (because it - * can return none anyway). The compiler level barrier() is critically - * important to compute the two checks atomically on the same pmdval. - * - * For 32bit kernels with a 64bit large pmd_t this automatically takes - * care of reading the pmd atomically to avoid SMP race conditions - * against pmd_populate() when the mmap_lock is hold for reading by the - * caller (a special atomic read not done by "gcc" as in the generic - * version above, is also needed when THP is disabled because the page - * fault can populate the pmd from under us). - */ -static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) -{ - pmd_t pmdval = pmdp_get_lockless(pmd); - /* - * !pmd_present() checks for pmd migration entries - * - * The complete check uses is_pmd_migration_entry() in linux/swapops.h - * But using that requires moving current function and pmd_trans_unstable() - * to linux/swapops.h to resolve dependency, which is too much code move. - * - * !pmd_present() is equivalent to is_pmd_migration_entry() currently, - * because !pmd_present() pages can only be under migration not swapped - * out. - * - * pmd_none() is preserved for future condition checks on pmd migration - * entries and not confusing with this function name, although it is - * redundant with !pmd_present(). - */ - if (pmd_none(pmdval) || pmd_trans_huge(pmdval) || - (IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval))) - return 1; - if (unlikely(pmd_bad(pmdval))) { - pmd_clear_bad(pmd); - return 1; - } return 0; } -/* - * This is a noop if Transparent Hugepage Support is not built into - * the kernel. Otherwise it is equivalent to - * pmd_none_or_trans_huge_or_clear_bad(), and shall only be called in - * places that already verified the pmd is not none and they want to - * walk ptes while holding the mmap sem in read mode (write mode don't - * need this). If THP is not enabled, the pmd can't go away under the - * code even if MADV_DONTNEED runs, but if THP is enabled we need to - * run a pmd_trans_unstable before walking the ptes after - * split_huge_pmd returns (because it may have run when the pmd become - * null, but then a page fault can map in a THP and not a regular page). - */ -static inline int pmd_trans_unstable(pmd_t *pmd) -{ -#ifdef CONFIG_TRANSPARENT_HUGEPAGE - return pmd_none_or_trans_huge_or_clear_bad(pmd); -#else - return 0; -#endif -} - -/* - * the ordering of these checks is important for pmds with _page_devmap set. - * if we check pmd_trans_unstable() first we will trip the bad_pmd() check - * inside of pmd_none_or_trans_huge_or_clear_bad(). this will end up correctly - * returning 1 but not before it spams dmesg with the pmd_clear_bad() output. - */ -static inline int pmd_devmap_trans_unstable(pmd_t *pmd) -{ - return pmd_devmap(*pmd) || pmd_trans_unstable(pmd); -} - #ifndef CONFIG_NUMA_BALANCING /* * Technically a PTE can be PROTNONE even when not doing NUMA balancing but diff --git a/mm/khugepaged.c b/mm/khugepaged.c index d1951ed572f8..881669e738c0 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -944,10 +944,6 @@ static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address, return SCAN_SUCCEED; } -/* - * See pmd_trans_unstable() for how the result may change out from - * underneath us, even if we hold mmap_lock in read. - */ static int find_pmd_or_thp_or_none(struct mm_struct *mm, unsigned long address, pmd_t **pmd) -- cgit v1.2.3 From 4f8fcf4ced0b7184149045818dcc2f9e2689b775 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:52:17 -0700 Subject: mm/swap: swap_vma_readahead() do the pte_offset_map() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit swap_vma_readahead() has been proceeding in an unconventional way, its preliminary swap_ra_info() doing the pte_offset_map() and pte_unmap(), then relying on that pte pointer even after the pte_unmap() - in its CONFIG_64BIT case (I think !CONFIG_HIGHPTE was intended; whereas 32-bit copied ptes to stack while they were mapped, but had to limit how many). Though it would be difficult to construct a failing testcase, accessing page table after pte_unmap() will become bad practice, even on 64-bit: an rcu_read_unlock() in pte_unmap() will allow page table to be freed. Move relevant definitions from include/linux/swap.h to mm/swap_state.c, nothing else used them. Delete the CONFIG_64BIT distinction and buffer, delete all reference to ptes from swap_ra_info(), use pte_offset_map() repeatedly in swap_vma_readahead(), breaking from the loop if it fails. (Will the repeated "map" and "unmap" show up as a slowdown anywhere? If so, maybe modify __read_swap_cache_async() to do the pte_unmap() only when it does not find the page already in the swapcache.) Use ptep_get_lockless(), mainly for its READ_ONCE(). Correctly advance the address passed down to each call of __read__swap_cache_async(). Link: https://lkml.kernel.org/r/b7c64ab3-9e44-aac0-d2b-c57de578af1c@google.com Signed-off-by: Hugh Dickins Reviewed-by: "Huang, Ying" Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- include/linux/swap.h | 19 ------------------- mm/swap_state.c | 45 ++++++++++++++++++++++++--------------------- 2 files changed, 24 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index b5f6f2916de1..ce7e82cf787f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -337,25 +337,6 @@ struct swap_info_struct { */ }; -#ifdef CONFIG_64BIT -#define SWAP_RA_ORDER_CEILING 5 -#else -/* Avoid stack overflow, because we need to save part of page table */ -#define SWAP_RA_ORDER_CEILING 3 -#define SWAP_RA_PTE_CACHE_SIZE (1 << SWAP_RA_ORDER_CEILING) -#endif - -struct vma_swap_readahead { - unsigned short win; - unsigned short offset; - unsigned short nr_pte; -#ifdef CONFIG_64BIT - pte_t *ptes; -#else - pte_t ptes[SWAP_RA_PTE_CACHE_SIZE]; -#endif -}; - static inline swp_entry_t folio_swap_entry(struct folio *folio) { swp_entry_t entry = { .val = page_private(&folio->page) }; diff --git a/mm/swap_state.c b/mm/swap_state.c index ef32353c18a6..a33c60e0158f 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -709,6 +709,14 @@ void exit_swap_address_space(unsigned int type) swapper_spaces[type] = NULL; } +#define SWAP_RA_ORDER_CEILING 5 + +struct vma_swap_readahead { + unsigned short win; + unsigned short offset; + unsigned short nr_pte; +}; + static void swap_ra_info(struct vm_fault *vmf, struct vma_swap_readahead *ra_info) { @@ -716,11 +724,7 @@ static void swap_ra_info(struct vm_fault *vmf, unsigned long ra_val; unsigned long faddr, pfn, fpfn, lpfn, rpfn; unsigned long start, end; - pte_t *pte, *orig_pte; unsigned int max_win, hits, prev_win, win; -#ifndef CONFIG_64BIT - pte_t *tpte; -#endif max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING); @@ -739,12 +743,9 @@ static void swap_ra_info(struct vm_fault *vmf, max_win, prev_win); atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0)); - if (win == 1) return; - /* Copy the PTEs because the page table may be unmapped */ - orig_pte = pte = pte_offset_map(vmf->pmd, faddr); if (fpfn == pfn + 1) { lpfn = fpfn; rpfn = fpfn + win; @@ -764,15 +765,6 @@ static void swap_ra_info(struct vm_fault *vmf, ra_info->nr_pte = end - start; ra_info->offset = fpfn - start; - pte -= ra_info->offset; -#ifdef CONFIG_64BIT - ra_info->ptes = pte; -#else - tpte = ra_info->ptes; - for (pfn = start; pfn != end; pfn++) - *tpte++ = *pte++; -#endif - pte_unmap(orig_pte); } /** @@ -796,7 +788,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, struct swap_iocb *splug = NULL; struct vm_area_struct *vma = vmf->vma; struct page *page; - pte_t *pte, pentry; + pte_t *pte = NULL, pentry; + unsigned long addr; swp_entry_t entry; unsigned int i; bool page_allocated; @@ -808,17 +801,25 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, if (ra_info.win == 1) goto skip; + addr = vmf->address - (ra_info.offset * PAGE_SIZE); + blk_start_plug(&plug); - for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; - i++, pte++) { - pentry = *pte; + for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) { + if (!pte++) { + pte = pte_offset_map(vmf->pmd, addr); + if (!pte) + break; + } + pentry = ptep_get_lockless(pte); if (!is_swap_pte(pentry)) continue; entry = pte_to_swp_entry(pentry); if (unlikely(non_swap_entry(entry))) continue; + pte_unmap(pte); + pte = NULL; page = __read_swap_cache_async(entry, gfp_mask, vma, - vmf->address, &page_allocated); + addr, &page_allocated); if (!page) continue; if (page_allocated) { @@ -830,6 +831,8 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, } put_page(page); } + if (pte) + pte_unmap(pte); blk_finish_plug(&plug); swap_read_unplug(splug); lru_add_drain(); -- cgit v1.2.3 From a92cbb82c8d375d47fbaf0e1ad3fd4074a7cb156 Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Thu, 8 Jun 2023 18:53:23 -0700 Subject: perf/core: allow pte_offset_map() to fail MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In rare transient cases, not yet made possible, pte_offset_map() and pte_offet_map_lock() may not find a page table: handle appropriately. [hughd@google.com: __wp_page_copy_user(): don't call update_mmu_tlb() with NULL] Link: https://lkml.kernel.org/r/1a4db221-7872-3594-57ce-42369945ec8d@google.com Link: https://lkml.kernel.org/r/a194441b-63f3-adb6-5964-7ca3171ae7c2@google.com Signed-off-by: Hugh Dickins Cc: Alistair Popple Cc: Anshuman Khandual Cc: Axel Rasmussen Cc: Christophe Leroy Cc: Christoph Hellwig Cc: David Hildenbrand Cc: "Huang, Ying" Cc: Ira Weiny Cc: Jason Gunthorpe Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Mel Gorman Cc: Miaohe Lin Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Minchan Kim Cc: Naoya Horiguchi Cc: Pavel Tatashin Cc: Peter Xu Cc: Peter Zijlstra Cc: Qi Zheng Cc: Ralph Campbell Cc: Ryan Roberts Cc: SeongJae Park Cc: Song Liu Cc: Steven Price Cc: Suren Baghdasaryan Cc: Thomas Hellström Cc: Will Deacon Cc: Yang Shi Cc: Yu Zhao Cc: Zack Rusin Signed-off-by: Andrew Morton --- kernel/events/core.c | 4 ++++ mm/memory.c | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/kernel/events/core.c b/kernel/events/core.c index db016e418931..174be710f3b3 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7490,6 +7490,7 @@ static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr) return pud_leaf_size(pud); pmdp = pmd_offset_lockless(pudp, pud, addr); +again: pmd = pmdp_get_lockless(pmdp); if (!pmd_present(pmd)) return 0; @@ -7498,6 +7499,9 @@ static u64 perf_get_pgtable_size(struct mm_struct *mm, unsigned long addr) return pmd_leaf_size(pmd); ptep = pte_offset_map(&pmd, addr); + if (!ptep) + goto again; + pte = ptep_get_lockless(ptep); if (pte_present(pte)) size = pte_leaf_size(pte); diff --git a/mm/memory.c b/mm/memory.c index 11f221953690..63c30f58142b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2843,7 +2843,8 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, * Other thread has already handled the fault * and update local tlb only */ - update_mmu_tlb(vma, addr, vmf->pte); + if (vmf->pte) + update_mmu_tlb(vma, addr, vmf->pte); ret = -EAGAIN; goto pte_unlock; } @@ -2867,7 +2868,8 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ - update_mmu_tlb(vma, addr, vmf->pte); + if (vmf->pte) + update_mmu_tlb(vma, addr, vmf->pte); ret = -EAGAIN; goto pte_unlock; } -- cgit v1.2.3 From b95826c9aa48b2997b3973b42a8716ba132b920e Mon Sep 17 00:00:00 2001 From: Sidhartha Kumar Date: Mon, 12 Jun 2023 09:34:05 -0700 Subject: mm: remove set_compound_page_dtor() All users can use the folio equivalent so this function can be safely removed. Link: https://lkml.kernel.org/r/20230612163405.99345-1-sidhartha.kumar@oracle.com Signed-off-by: Sidhartha Kumar Cc: Aneesh Kumar K.V Cc: Matthew Wilcox Cc: Tarun Sahu Signed-off-by: Andrew Morton --- include/linux/mm.h | 10 ---------- mm/huge_memory.c | 2 +- mm/internal.h | 2 +- 3 files changed, 2 insertions(+), 12 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index a08dc8cc48fb..8f40bf17d597 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1223,16 +1223,6 @@ enum compound_dtor_id { }; extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; -static inline void set_compound_page_dtor(struct page *page, - enum compound_dtor_id compound_dtor) -{ - struct folio *folio = (struct folio *)page; - - VM_BUG_ON_PAGE(compound_dtor >= NR_COMPOUND_DTORS, page); - VM_BUG_ON_PAGE(!PageHead(page), page); - folio->_folio_dtor = compound_dtor; -} - static inline void folio_set_compound_dtor(struct folio *folio, enum compound_dtor_id compound_dtor) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 31bc8fa768e3..76f970aa5b4d 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -583,7 +583,7 @@ void prep_transhuge_page(struct page *page) VM_BUG_ON_FOLIO(folio_order(folio) < 2, folio); INIT_LIST_HEAD(&folio->_deferred_list); - set_compound_page_dtor(page, TRANSHUGE_PAGE_DTOR); + folio_set_compound_dtor(folio, TRANSHUGE_PAGE_DTOR); } static inline bool is_transparent_hugepage(struct page *page) diff --git a/mm/internal.h b/mm/internal.h index faf0508d89a5..33b8b8f66af3 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -391,7 +391,7 @@ static inline void prep_compound_head(struct page *page, unsigned int order) { struct folio *folio = (struct folio *)page; - set_compound_page_dtor(page, COMPOUND_PAGE_DTOR); + folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); set_compound_order(page, order); atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); -- cgit v1.2.3 From 349d1670008d3dab99a11b015bef51ad3f26fb4f Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 12 Jun 2023 12:04:20 -0400 Subject: mm/hugetlb: fix pgtable lock on pmd sharing Huge pmd sharing operates on PUD not PMD, huge_pte_lock() is not suitable in this case because it should only work for last level pte changes, while pmd sharing is always one level higher. Meanwhile, here we're locking over the spte pgtable lock which is even not a lock for current mm but someone else's. It seems even racy on operating on the lock, as after put_page() of the spte pgtable page logically the page can be released, so at least the spin_unlock() needs to be done after the put_page(). No report I am aware, I'm not even sure whether it'll just work on taking the spte pmd lock, because while we're holding i_mmap read lock it probably means the vma interval tree is frozen, all pte allocators over this pud entry could always find the specific svma and spte page, so maybe they'll serialize on this spte page lock? Even so, doesn't seem to be expected. It just seems to be an accident of cb900f412154. Fix it with the proper pud lock (which is the mm's page_table_lock). Link: https://lkml.kernel.org/r/20230612160420.809818-1-peterx@redhat.com Fixes: cb900f412154 ("mm, hugetlb: convert hugetlbfs to use split pmd lock") Signed-off-by: Peter Xu Reviewed-by: Mike Kravetz Cc: Naoya Horiguchi Signed-off-by: Andrew Morton --- mm/hugetlb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index de96cd936b12..1d3d8a61b336 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7130,7 +7130,6 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long saddr; pte_t *spte = NULL; pte_t *pte; - spinlock_t *ptl; i_mmap_lock_read(mapping); vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { @@ -7151,7 +7150,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, if (!spte) goto out; - ptl = huge_pte_lock(hstate_vma(vma), mm, spte); + spin_lock(&mm->page_table_lock); if (pud_none(*pud)) { pud_populate(mm, pud, (pmd_t *)((unsigned long)spte & PAGE_MASK)); @@ -7159,7 +7158,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, } else { put_page(virt_to_page(spte)); } - spin_unlock(ptl); + spin_unlock(&mm->page_table_lock); out: pte = (pte_t *)pmd_alloc(mm, pud, addr); i_mmap_unlock_read(mapping); -- cgit v1.2.3 From 0c474d31a6378f20cbe83f62d4177ebdc099c7fc Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 12 Jun 2023 16:31:47 +0100 Subject: mm/slab: simplify create_kmalloc_cache() args and make it static In the slab variant of kmem_cache_init(), call new_kmalloc_cache() instead of initialising the kmalloc_caches array directly. With this, create_kmalloc_cache() is now only called from new_kmalloc_cache() in the same file, so make it static. In addition, the useroffset argument is always 0 while usersize is the same as size. Remove them. Link: https://lkml.kernel.org/r/20230612153201.554742-4-catalin.marinas@arm.com Signed-off-by: Catalin Marinas Reviewed-by: Vlastimil Babka Tested-by: Isaac J. Manjarres Cc: Alasdair Kergon Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Christoph Hellwig Cc: Daniel Vetter Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: Jerry Snitselaar Cc: Joerg Roedel Cc: Jonathan Cameron Cc: Jonathan Cameron Cc: Lars-Peter Clausen Cc: Logan Gunthorpe Cc: Marc Zyngier Cc: Mark Brown Cc: Mike Snitzer Cc: "Rafael J. Wysocki" Cc: Robin Murphy Cc: Saravana Kannan Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/slab.c | 6 +----- mm/slab.h | 5 ++--- mm/slab_common.c | 14 ++++++-------- 3 files changed, 9 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index bb57f7fdbae1..b7817dcba63e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1240,11 +1240,7 @@ void __init kmem_cache_init(void) * Initialize the caches that provide memory for the kmem_cache_node * structures first. Without this, further allocations will bug. */ - kmalloc_caches[KMALLOC_NORMAL][INDEX_NODE] = create_kmalloc_cache( - kmalloc_info[INDEX_NODE].name[KMALLOC_NORMAL], - kmalloc_info[INDEX_NODE].size, - ARCH_KMALLOC_FLAGS, 0, - kmalloc_info[INDEX_NODE].size); + new_kmalloc_cache(INDEX_NODE, KMALLOC_NORMAL, ARCH_KMALLOC_FLAGS); slab_state = PARTIAL_NODE; setup_kmalloc_cache_index_table(); diff --git a/mm/slab.h b/mm/slab.h index f01ac256a8f5..592590fcddae 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -255,9 +255,8 @@ gfp_t kmalloc_fix_flags(gfp_t flags); /* Functions provided by the slab allocators */ int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); -struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, - slab_flags_t flags, unsigned int useroffset, - unsigned int usersize); +void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, + slab_flags_t flags); extern void create_boot_cache(struct kmem_cache *, const char *name, unsigned int size, slab_flags_t flags, unsigned int useroffset, unsigned int usersize); diff --git a/mm/slab_common.c b/mm/slab_common.c index 607249785c07..7f069159aee2 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -658,17 +658,16 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, s->refcount = -1; /* Exempt from merging for now */ } -struct kmem_cache *__init create_kmalloc_cache(const char *name, - unsigned int size, slab_flags_t flags, - unsigned int useroffset, unsigned int usersize) +static struct kmem_cache *__init create_kmalloc_cache(const char *name, + unsigned int size, + slab_flags_t flags) { struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); if (!s) panic("Out of memory when creating slab %s\n", name); - create_boot_cache(s, name, size, flags | SLAB_KMALLOC, useroffset, - usersize); + create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size); list_add(&s->list, &slab_caches); s->refcount = 1; return s; @@ -863,7 +862,7 @@ void __init setup_kmalloc_cache_index_table(void) } } -static void __init +void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) { if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { @@ -880,8 +879,7 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) kmalloc_caches[type][idx] = create_kmalloc_cache( kmalloc_info[idx].name[type], - kmalloc_info[idx].size, flags, 0, - kmalloc_info[idx].size); + kmalloc_info[idx].size, flags); /* * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for -- cgit v1.2.3 From 963e84b0f262297466dff440ccfbb868ecb971b3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 12 Jun 2023 16:31:48 +0100 Subject: mm/slab: limit kmalloc() minimum alignment to dma_get_cache_alignment() Do not create kmalloc() caches which are not aligned to dma_get_cache_alignment(). There is no functional change since for current architectures defining ARCH_DMA_MINALIGN, ARCH_KMALLOC_MINALIGN equals ARCH_DMA_MINALIGN (and dma_get_cache_alignment()). On architectures without a specific ARCH_DMA_MINALIGN, dma_get_cache_alignment() is 1, so no change to the kmalloc() caches. Link: https://lkml.kernel.org/r/20230612153201.554742-5-catalin.marinas@arm.com Signed-off-by: Catalin Marinas Reviewed-by: Vlastimil Babka Tested-by: Isaac J. Manjarres Cc: Christoph Hellwig Cc: Robin Murphy Cc: Alasdair Kergon Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Daniel Vetter Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: Jerry Snitselaar Cc: Joerg Roedel Cc: Jonathan Cameron Cc: Jonathan Cameron Cc: Lars-Peter Clausen Cc: Logan Gunthorpe Cc: Marc Zyngier Cc: Mark Brown Cc: Mike Snitzer Cc: "Rafael J. Wysocki" Cc: Saravana Kannan Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/slab_common.c | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index 7f069159aee2..7c6475847fdf 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -862,9 +863,18 @@ void __init setup_kmalloc_cache_index_table(void) } } +static unsigned int __kmalloc_minalign(void) +{ + return dma_get_cache_alignment(); +} + void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) { + unsigned int minalign = __kmalloc_minalign(); + unsigned int aligned_size = kmalloc_info[idx].size; + int aligned_idx = idx; + if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) { flags |= SLAB_RECLAIM_ACCOUNT; } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) { @@ -877,9 +887,17 @@ new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags) flags |= SLAB_CACHE_DMA; } - kmalloc_caches[type][idx] = create_kmalloc_cache( - kmalloc_info[idx].name[type], - kmalloc_info[idx].size, flags); + if (minalign > ARCH_KMALLOC_MINALIGN) { + aligned_size = ALIGN(aligned_size, minalign); + aligned_idx = __kmalloc_index(aligned_size, false); + } + + if (!kmalloc_caches[type][aligned_idx]) + kmalloc_caches[type][aligned_idx] = create_kmalloc_cache( + kmalloc_info[aligned_idx].name[type], + aligned_size, flags); + if (idx != aligned_idx) + kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx]; /* * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for -- cgit v1.2.3 From b035f5a6d8521bc77448d1c61db6244d91da3325 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Mon, 12 Jun 2023 16:32:00 +0100 Subject: mm: slab: reduce the kmalloc() minimum alignment if DMA bouncing possible If an architecture opted in to DMA bouncing of unaligned kmalloc() buffers (ARCH_WANT_KMALLOC_DMA_BOUNCE), reduce the minimum kmalloc() cache alignment below cache-line size to ARCH_KMALLOC_MINALIGN. Link: https://lkml.kernel.org/r/20230612153201.554742-17-catalin.marinas@arm.com Signed-off-by: Catalin Marinas Reviewed-by: Vlastimil Babka Tested-by: Isaac J. Manjarres Cc: Christoph Hellwig Cc: Robin Murphy Cc: Alasdair Kergon Cc: Ard Biesheuvel Cc: Arnd Bergmann Cc: Daniel Vetter Cc: Greg Kroah-Hartman Cc: Herbert Xu Cc: Jerry Snitselaar Cc: Joerg Roedel Cc: Jonathan Cameron Cc: Jonathan Cameron Cc: Lars-Peter Clausen Cc: Logan Gunthorpe Cc: Marc Zyngier Cc: Mark Brown Cc: Mike Snitzer Cc: "Rafael J. Wysocki" Cc: Saravana Kannan Cc: Will Deacon Signed-off-by: Andrew Morton --- mm/slab_common.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index 7c6475847fdf..43c008165f56 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -865,6 +866,10 @@ void __init setup_kmalloc_cache_index_table(void) static unsigned int __kmalloc_minalign(void) { +#ifdef CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC + if (io_tlb_default_mem.nslabs) + return ARCH_KMALLOC_MINALIGN; +#endif return dma_get_cache_alignment(); } -- cgit v1.2.3 From 426931e7e5d96664616794c7c36efc748a7497e2 Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 12 Jun 2023 16:15:43 +0100 Subject: mm: ptdump should use ptep_get_lockless() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Patch series "Encapsulate PTE contents from non-arch code", v3. A series to improve the encapsulation of pte entries by disallowing non-arch code from directly dereferencing pte_t pointers. This means that by default, the accesses change from a C dereference to a READ_ONCE(). This is technically the correct thing to do since where pgtables are modified by HW (for access/dirty) they are volatile and therefore we should always ensure READ_ONCE() semantics. But more importantly, by always using the helper, it can be overridden by the architecture to fully encapsulate the contents of the pte. Arch code is deliberately not converted, as the arch code knows best. It is intended that arch code (arm64) will override the default with its own implementation that can (e.g.) hide certain bits from the core code, or determine young/dirty status by mixing in state from another source. This patch (of 3): The page table dumper uses walk_page_range_novma() to walk the page tables, which does not lock the PTL before calling the pte_entry() callback. Therefore, the page table dumper's callback must use ptep_get_lockless() rather than ptep_get() to ensure that the pte it reads is not torn or otherwise corrupt when racing with writers. Link: https://lkml.kernel.org/r/20230612151545.3317766-1-ryan.roberts@arm.com Link: https://lkml.kernel.org/r/20230612151545.3317766-2-ryan.roberts@arm.com Signed-off-by: Ryan Roberts Cc: Adrian Hunter Cc: Alexander Potapenko Cc: Alexander Shishkin Cc: Alex Williamson Cc: Al Viro Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Christian Brauner Cc: Christoph Hellwig Cc: Daniel Vetter Cc: Dave Airlie Cc: Dimitri Sivanich Cc: Dmitry Vyukov Cc: Ian Rogers Cc: Jason Gunthorpe Cc: Jérôme Glisse Cc: Jiri Olsa Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Mark Rutland Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Michal Hocko Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Muchun Song Cc: Namhyung Kim Cc: Naoya Horiguchi Cc: Oleksandr Tyshchenko Cc: Pavel Tatashin Cc: Roman Gushchin Cc: SeongJae Park Cc: Shakeel Butt Cc: Uladzislau Rezki (Sony) Cc: Vincenzo Frascino Cc: Yu Zhao Cc: kernel test robot Signed-off-by: Andrew Morton --- mm/ptdump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/ptdump.c b/mm/ptdump.c index 8adab455a68b..03c1bdae4a43 100644 --- a/mm/ptdump.c +++ b/mm/ptdump.c @@ -119,7 +119,7 @@ static int ptdump_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { struct ptdump_state *st = walk->private; - pte_t val = ptep_get(pte); + pte_t val = ptep_get_lockless(pte); if (st->effective_prot) st->effective_prot(st, 4, pte_val(val)); -- cgit v1.2.3 From c33c794828f21217f72ce6fc140e0d34e0d56bff Mon Sep 17 00:00:00 2001 From: Ryan Roberts Date: Mon, 12 Jun 2023 16:15:45 +0100 Subject: mm: ptep_get() conversion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert all instances of direct pte_t* dereferencing to instead use ptep_get() helper. This means that by default, the accesses change from a C dereference to a READ_ONCE(). This is technically the correct thing to do since where pgtables are modified by HW (for access/dirty) they are volatile and therefore we should always ensure READ_ONCE() semantics. But more importantly, by always using the helper, it can be overridden by the architecture to fully encapsulate the contents of the pte. Arch code is deliberately not converted, as the arch code knows best. It is intended that arch code (arm64) will override the default with its own implementation that can (e.g.) hide certain bits from the core code, or determine young/dirty status by mixing in state from another source. Conversion was done using Coccinelle: ---- // $ make coccicheck \ // COCCI=ptepget.cocci \ // SPFLAGS="--include-headers" \ // MODE=patch virtual patch @ depends on patch @ pte_t *v; @@ - *v + ptep_get(v) ---- Then reviewed and hand-edited to avoid multiple unnecessary calls to ptep_get(), instead opting to store the result of a single call in a variable, where it is correct to do so. This aims to negate any cost of READ_ONCE() and will benefit arch-overrides that may be more complex. Included is a fix for an issue in an earlier version of this patch that was pointed out by kernel test robot. The issue arose because config MMU=n elides definition of the ptep helper functions, including ptep_get(). HUGETLB_PAGE=n configs still define a simple huge_ptep_clear_flush() for linking purposes, which dereferences the ptep. So when both configs are disabled, this caused a build error because ptep_get() is not defined. Fix by continuing to do a direct dereference when MMU=n. This is safe because for this config the arch code cannot be trying to virtualize the ptes because none of the ptep helpers are defined. Link: https://lkml.kernel.org/r/20230612151545.3317766-4-ryan.roberts@arm.com Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202305120142.yXsNEo6H-lkp@intel.com/ Signed-off-by: Ryan Roberts Cc: Adrian Hunter Cc: Alexander Potapenko Cc: Alexander Shishkin Cc: Alex Williamson Cc: Al Viro Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Christian Brauner Cc: Christoph Hellwig Cc: Daniel Vetter Cc: Dave Airlie Cc: Dimitri Sivanich Cc: Dmitry Vyukov Cc: Ian Rogers Cc: Jason Gunthorpe Cc: Jérôme Glisse Cc: Jiri Olsa Cc: Johannes Weiner Cc: Kirill A. Shutemov Cc: Lorenzo Stoakes Cc: Mark Rutland Cc: Matthew Wilcox Cc: Miaohe Lin Cc: Michal Hocko Cc: Mike Kravetz Cc: Mike Rapoport (IBM) Cc: Muchun Song Cc: Namhyung Kim Cc: Naoya Horiguchi Cc: Oleksandr Tyshchenko Cc: Pavel Tatashin Cc: Roman Gushchin Cc: SeongJae Park Cc: Shakeel Butt Cc: Uladzislau Rezki (Sony) Cc: Vincenzo Frascino Cc: Yu Zhao Signed-off-by: Andrew Morton --- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 8 +- drivers/misc/sgi-gru/grufault.c | 2 +- drivers/vfio/vfio_iommu_type1.c | 7 +- drivers/xen/privcmd.c | 2 +- fs/proc/task_mmu.c | 33 +++---- fs/userfaultfd.c | 6 +- include/linux/hugetlb.h | 4 + include/linux/mm_inline.h | 2 +- include/linux/pgtable.h | 6 +- kernel/events/uprobes.c | 2 +- mm/damon/ops-common.c | 2 +- mm/damon/paddr.c | 2 +- mm/damon/vaddr.c | 10 ++- mm/filemap.c | 2 +- mm/gup.c | 21 +++-- mm/highmem.c | 12 +-- mm/hmm.c | 2 +- mm/huge_memory.c | 4 +- mm/hugetlb.c | 2 +- mm/hugetlb_vmemmap.c | 6 +- mm/kasan/init.c | 9 +- mm/kasan/shadow.c | 10 +-- mm/khugepaged.c | 22 ++--- mm/ksm.c | 22 ++--- mm/madvise.c | 6 +- mm/mapping_dirty_helpers.c | 4 +- mm/memcontrol.c | 4 +- mm/memory-failure.c | 26 +++--- mm/memory.c | 100 +++++++++++---------- mm/mempolicy.c | 6 +- mm/migrate.c | 14 +-- mm/migrate_device.c | 15 ++-- mm/mincore.c | 2 +- mm/mlock.c | 6 +- mm/mprotect.c | 8 +- mm/mremap.c | 2 +- mm/page_table_check.c | 4 +- mm/page_vma_mapped.c | 27 +++--- mm/pgtable-generic.c | 2 +- mm/rmap.c | 34 ++++--- mm/sparse-vmemmap.c | 8 +- mm/swap_state.c | 8 +- mm/swapfile.c | 20 +++-- mm/userfaultfd.c | 4 +- mm/vmalloc.c | 6 +- mm/vmscan.c | 14 +-- virt/kvm/kvm_main.c | 11 ++- 47 files changed, 301 insertions(+), 228 deletions(-) (limited to 'mm') diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index 56279908ed30..01e271b6ad21 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -1681,7 +1681,9 @@ static int igt_mmap_gpu(void *arg) static int check_present_pte(pte_t *pte, unsigned long addr, void *data) { - if (!pte_present(*pte) || pte_none(*pte)) { + pte_t ptent = ptep_get(pte); + + if (!pte_present(ptent) || pte_none(ptent)) { pr_err("missing PTE:%lx\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; @@ -1692,7 +1694,9 @@ static int check_present_pte(pte_t *pte, unsigned long addr, void *data) static int check_absent_pte(pte_t *pte, unsigned long addr, void *data) { - if (pte_present(*pte) && !pte_none(*pte)) { + pte_t ptent = ptep_get(pte); + + if (pte_present(ptent) && !pte_none(ptent)) { pr_err("present PTE:%lx; expected to be revoked\n", (addr - (unsigned long)data) >> PAGE_SHIFT); return -EINVAL; diff --git a/drivers/misc/sgi-gru/grufault.c b/drivers/misc/sgi-gru/grufault.c index 378cf02a2aa1..629edb6486de 100644 --- a/drivers/misc/sgi-gru/grufault.c +++ b/drivers/misc/sgi-gru/grufault.c @@ -228,7 +228,7 @@ static int atomic_pte_lookup(struct vm_area_struct *vma, unsigned long vaddr, goto err; #ifdef CONFIG_X86_64 if (unlikely(pmd_large(*pmdp))) - pte = *(pte_t *) pmdp; + pte = ptep_get((pte_t *)pmdp); else #endif pte = *pte_offset_kernel(pmdp, vaddr); diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c index 306e6f1d1c70..ebe0ad31d0b0 100644 --- a/drivers/vfio/vfio_iommu_type1.c +++ b/drivers/vfio/vfio_iommu_type1.c @@ -514,6 +514,7 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, bool write_fault) { pte_t *ptep; + pte_t pte; spinlock_t *ptl; int ret; @@ -536,10 +537,12 @@ static int follow_fault_pfn(struct vm_area_struct *vma, struct mm_struct *mm, return ret; } - if (write_fault && !pte_write(*ptep)) + pte = ptep_get(ptep); + + if (write_fault && !pte_write(pte)) ret = -EFAULT; else - *pfn = pte_pfn(*ptep); + *pfn = pte_pfn(pte); pte_unmap_unlock(ptep, ptl); return ret; diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index e2f580e30a86..f447cd37cc4c 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -949,7 +949,7 @@ static int privcmd_mmap(struct file *file, struct vm_area_struct *vma) */ static int is_mapped_fn(pte_t *pte, unsigned long addr, void *data) { - return pte_none(*pte) ? 0 : -EBUSY; + return pte_none(ptep_get(pte)) ? 0 : -EBUSY; } static int privcmd_vma_range_is_mapped( diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 0d63b6a0f0d8..507cd4e59d07 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -538,13 +538,14 @@ static void smaps_pte_entry(pte_t *pte, unsigned long addr, bool locked = !!(vma->vm_flags & VM_LOCKED); struct page *page = NULL; bool migration = false, young = false, dirty = false; + pte_t ptent = ptep_get(pte); - if (pte_present(*pte)) { - page = vm_normal_page(vma, addr, *pte); - young = pte_young(*pte); - dirty = pte_dirty(*pte); - } else if (is_swap_pte(*pte)) { - swp_entry_t swpent = pte_to_swp_entry(*pte); + if (pte_present(ptent)) { + page = vm_normal_page(vma, addr, ptent); + young = pte_young(ptent); + dirty = pte_dirty(ptent); + } else if (is_swap_pte(ptent)) { + swp_entry_t swpent = pte_to_swp_entry(ptent); if (!non_swap_entry(swpent)) { int mapcount; @@ -732,11 +733,12 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask, struct mem_size_stats *mss = walk->private; struct vm_area_struct *vma = walk->vma; struct page *page = NULL; + pte_t ptent = ptep_get(pte); - if (pte_present(*pte)) { - page = vm_normal_page(vma, addr, *pte); - } else if (is_swap_pte(*pte)) { - swp_entry_t swpent = pte_to_swp_entry(*pte); + if (pte_present(ptent)) { + page = vm_normal_page(vma, addr, ptent); + } else if (is_swap_pte(ptent)) { + swp_entry_t swpent = pte_to_swp_entry(ptent); if (is_pfn_swap_entry(swpent)) page = pfn_swap_entry_to_page(swpent); @@ -1105,7 +1107,7 @@ static inline void clear_soft_dirty(struct vm_area_struct *vma, * Documentation/admin-guide/mm/soft-dirty.rst for full description * of how soft-dirty works. */ - pte_t ptent = *pte; + pte_t ptent = ptep_get(pte); if (pte_present(ptent)) { pte_t old_pte; @@ -1194,7 +1196,7 @@ out: return 0; } for (; addr != end; pte++, addr += PAGE_SIZE) { - ptent = *pte; + ptent = ptep_get(pte); if (cp->type == CLEAR_REFS_SOFT_DIRTY) { clear_soft_dirty(vma, addr, pte); @@ -1550,7 +1552,7 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, for (; addr < end; pte++, addr += PAGE_SIZE) { pagemap_entry_t pme; - pme = pte_to_pagemap_entry(pm, vma, addr, *pte); + pme = pte_to_pagemap_entry(pm, vma, addr, ptep_get(pte)); err = add_to_pagemap(addr, &pme, pm); if (err) break; @@ -1893,10 +1895,11 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr, return 0; } do { - struct page *page = can_gather_numa_stats(*pte, vma, addr); + pte_t ptent = ptep_get(pte); + struct page *page = can_gather_numa_stats(ptent, vma, addr); if (!page) continue; - gather_stats(page, md, pte_dirty(*pte), 1); + gather_stats(page, md, pte_dirty(ptent), 1); } while (pte++, addr += PAGE_SIZE, addr != end); pte_unmap_unlock(orig_pte, ptl); diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index ca83423f8d54..478e2b169c13 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -335,6 +335,7 @@ static inline bool userfaultfd_must_wait(struct userfaultfd_ctx *ctx, pud_t *pud; pmd_t *pmd, _pmd; pte_t *pte; + pte_t ptent; bool ret = true; mmap_assert_locked(mm); @@ -374,9 +375,10 @@ again: * changes under us. PTE markers should be handled the same as none * ptes here. */ - if (pte_none_mostly(*pte)) + ptent = ptep_get(pte); + if (pte_none_mostly(ptent)) ret = true; - if (!pte_write(*pte) && (reason & VM_UFFD_WP)) + if (!pte_write(ptent) && (reason & VM_UFFD_WP)) ret = true; pte_unmap(pte); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 21f942025fec..beb7c63d2871 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -1185,7 +1185,11 @@ static inline void hugetlb_count_sub(long l, struct mm_struct *mm) static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { +#ifdef CONFIG_MMU + return ptep_get(ptep); +#else return *ptep; +#endif } static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 0e1d239a882c..08c2bcefcb2b 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h @@ -555,7 +555,7 @@ pte_install_uffd_wp_if_needed(struct vm_area_struct *vma, unsigned long addr, bool arm_uffd_pte = false; /* The current status of the pte should be "cleared" before calling */ - WARN_ON_ONCE(!pte_none(*pte)); + WARN_ON_ONCE(!pte_none(ptep_get(pte))); /* * NOTE: userfaultfd_wp_unpopulated() doesn't need this whole diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index fc06f6419661..5063b482e34f 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -231,7 +231,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); int r = 1; if (!pte_young(pte)) r = 0; @@ -318,7 +318,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long address, pte_t *ptep) { - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); pte_clear(mm, address, ptep); page_table_check_pte_clear(mm, address, pte); return pte; @@ -519,7 +519,7 @@ extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, struct mm_struct; static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) { - pte_t old_pte = *ptep; + pte_t old_pte = ptep_get(ptep); set_pte_at(mm, address, ptep, pte_wrprotect(old_pte)); } #endif diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index 607d742caa61..f0ac5b874919 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -192,7 +192,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr, inc_mm_counter(mm, MM_ANONPAGES); } - flush_cache_page(vma, addr, pte_pfn(*pvmw.pte)); + flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte))); ptep_clear_flush_notify(vma, addr, pvmw.pte); if (new_page) set_pte_at_notify(mm, addr, pvmw.pte, diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c index d4ab81229136..e940802a15a4 100644 --- a/mm/damon/ops-common.c +++ b/mm/damon/ops-common.c @@ -39,7 +39,7 @@ struct folio *damon_get_folio(unsigned long pfn) void damon_ptep_mkold(pte_t *pte, struct vm_area_struct *vma, unsigned long addr) { - struct folio *folio = damon_get_folio(pte_pfn(*pte)); + struct folio *folio = damon_get_folio(pte_pfn(ptep_get(pte))); if (!folio) return; diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 5b3a3463d078..40801e38fcf0 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -89,7 +89,7 @@ static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma, while (page_vma_mapped_walk(&pvmw)) { addr = pvmw.address; if (pvmw.pte) { - *accessed = pte_young(*pvmw.pte) || + *accessed = pte_young(ptep_get(pvmw.pte)) || !folio_test_idle(folio) || mmu_notifier_test_young(vma->vm_mm, addr); } else { diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c index e814f66dfc2e..2fcc9731528a 100644 --- a/mm/damon/vaddr.c +++ b/mm/damon/vaddr.c @@ -323,7 +323,7 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr, walk->action = ACTION_AGAIN; return 0; } - if (!pte_present(*pte)) + if (!pte_present(ptep_get(pte))) goto out; damon_ptep_mkold(pte, walk->vma, addr); out: @@ -433,6 +433,7 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { pte_t *pte; + pte_t ptent; spinlock_t *ptl; struct folio *folio; struct damon_young_walk_private *priv = walk->private; @@ -471,12 +472,13 @@ regular_page: walk->action = ACTION_AGAIN; return 0; } - if (!pte_present(*pte)) + ptent = ptep_get(pte); + if (!pte_present(ptent)) goto out; - folio = damon_get_folio(pte_pfn(*pte)); + folio = damon_get_folio(pte_pfn(ptent)); if (!folio) goto out; - if (pte_young(*pte) || !folio_test_idle(folio) || + if (pte_young(ptent) || !folio_test_idle(folio) || mmu_notifier_test_young(walk->mm, addr)) priv->young = true; *priv->folio_sz = folio_size(folio); diff --git a/mm/filemap.c b/mm/filemap.c index 1893048ec9ff..00933089b8b6 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3523,7 +3523,7 @@ again: * handled in the specific fault path, and it'll prohibit the * fault-around logic. */ - if (!pte_none(*vmf->pte)) + if (!pte_none(ptep_get(vmf->pte))) goto unlock; /* We're about to handle the fault */ diff --git a/mm/gup.c b/mm/gup.c index 838db6c0bfc2..38986e522d34 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -477,13 +477,14 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, unsigned int flags) { if (flags & FOLL_TOUCH) { - pte_t entry = *pte; + pte_t orig_entry = ptep_get(pte); + pte_t entry = orig_entry; if (flags & FOLL_WRITE) entry = pte_mkdirty(entry); entry = pte_mkyoung(entry); - if (!pte_same(*pte, entry)) { + if (!pte_same(orig_entry, entry)) { set_pte_at(vma->vm_mm, address, pte, entry); update_mmu_cache(vma, address, pte); } @@ -549,7 +550,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma, ptep = pte_offset_map_lock(mm, pmd, address, &ptl); if (!ptep) return no_page_table(vma, flags); - pte = *ptep; + pte = ptep_get(ptep); if (!pte_present(pte)) goto no_page; if (pte_protnone(pte) && !gup_can_follow_protnone(flags)) @@ -821,6 +822,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, pud_t *pud; pmd_t *pmd; pte_t *pte; + pte_t entry; int ret = -EFAULT; /* user gate pages are read-only */ @@ -844,16 +846,17 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address, pte = pte_offset_map(pmd, address); if (!pte) return -EFAULT; - if (pte_none(*pte)) + entry = ptep_get(pte); + if (pte_none(entry)) goto unmap; *vma = get_gate_vma(mm); if (!page) goto out; - *page = vm_normal_page(*vma, address, *pte); + *page = vm_normal_page(*vma, address, entry); if (!*page) { - if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(*pte))) + if ((gup_flags & FOLL_DUMP) || !is_zero_pfn(pte_pfn(entry))) goto unmap; - *page = pte_page(*pte); + *page = pte_page(entry); } ret = try_grab_page(*page, gup_flags); if (unlikely(ret)) @@ -2496,7 +2499,7 @@ static int gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, } if (unlikely(pmd_val(pmd) != pmd_val(*pmdp)) || - unlikely(pte_val(pte) != pte_val(*ptep))) { + unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, 1, flags); goto pte_unmap; } @@ -2693,7 +2696,7 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, if (!folio) return 0; - if (unlikely(pte_val(pte) != pte_val(*ptep))) { + if (unlikely(pte_val(pte) != pte_val(ptep_get(ptep)))) { gup_put_folio(folio, refs, flags); return 0; } diff --git a/mm/highmem.c b/mm/highmem.c index db251e77f98f..e19269093a93 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -161,7 +161,7 @@ struct page *__kmap_to_page(void *vaddr) /* kmap() mappings */ if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP))) - return pte_page(pkmap_page_table[PKMAP_NR(addr)]); + return pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(addr)])); /* kmap_local_page() mappings */ if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) && @@ -191,6 +191,7 @@ static void flush_all_zero_pkmaps(void) for (i = 0; i < LAST_PKMAP; i++) { struct page *page; + pte_t ptent; /* * zero means we don't have anything to do, @@ -203,7 +204,8 @@ static void flush_all_zero_pkmaps(void) pkmap_count[i] = 0; /* sanity check */ - BUG_ON(pte_none(pkmap_page_table[i])); + ptent = ptep_get(&pkmap_page_table[i]); + BUG_ON(pte_none(ptent)); /* * Don't need an atomic fetch-and-clear op here; @@ -212,7 +214,7 @@ static void flush_all_zero_pkmaps(void) * getting the kmap_lock (which is held here). * So no dangers, even with speculative execution. */ - page = pte_page(pkmap_page_table[i]); + page = pte_page(ptent); pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]); set_page_address(page, NULL); @@ -511,7 +513,7 @@ static inline bool kmap_high_unmap_local(unsigned long vaddr) { #ifdef ARCH_NEEDS_KMAP_HIGH_GET if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { - kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); + kunmap_high(pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(vaddr)]))); return true; } #endif @@ -548,7 +550,7 @@ void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot) idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); kmap_pte = kmap_get_pte(vaddr, idx); - BUG_ON(!pte_none(*kmap_pte)); + BUG_ON(!pte_none(ptep_get(kmap_pte))); pteval = pfn_pte(pfn, prot); arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval); arch_kmap_local_post_map(vaddr, pteval); diff --git a/mm/hmm.c b/mm/hmm.c index b1a9159d7c92..855e25e59d8f 100644 --- a/mm/hmm.c +++ b/mm/hmm.c @@ -228,7 +228,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr, struct hmm_range *range = hmm_vma_walk->range; unsigned int required_fault; unsigned long cpu_flags; - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); uint64_t pfn_req_flags = *hmm_pfn; if (pte_none_mostly(pte)) { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 76f970aa5b4d..e94fe292f30a 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2063,7 +2063,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, entry = pte_mkspecial(entry); if (pmd_uffd_wp(old_pmd)) entry = pte_mkuffd_wp(entry); - VM_BUG_ON(!pte_none(*pte)); + VM_BUG_ON(!pte_none(ptep_get(pte))); set_pte_at(mm, addr, pte, entry); pte++; } @@ -2257,7 +2257,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd, entry = pte_mkuffd_wp(entry); page_add_anon_rmap(page + i, vma, addr, false); } - VM_BUG_ON(!pte_none(*pte)); + VM_BUG_ON(!pte_none(ptep_get(pte))); set_pte_at(mm, addr, pte, entry); pte++; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1d3d8a61b336..d76574425da3 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -7246,7 +7246,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, pte = (pte_t *)pmd_alloc(mm, pud, addr); } } - BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte)); + BUG_ON(pte && pte_present(ptep_get(pte)) && !pte_huge(ptep_get(pte))); return pte; } diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index f42079b73f82..c2007ef5e9b0 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -105,7 +105,7 @@ static void vmemmap_pte_range(pmd_t *pmd, unsigned long addr, * remapping (which is calling @walk->remap_pte). */ if (!walk->reuse_page) { - walk->reuse_page = pte_page(*pte); + walk->reuse_page = pte_page(ptep_get(pte)); /* * Because the reuse address is part of the range that we are * walking, skip the reuse address range. @@ -239,7 +239,7 @@ static void vmemmap_remap_pte(pte_t *pte, unsigned long addr, * to the tail pages. */ pgprot_t pgprot = PAGE_KERNEL_RO; - struct page *page = pte_page(*pte); + struct page *page = pte_page(ptep_get(pte)); pte_t entry; /* Remapping the head page requires r/w */ @@ -286,7 +286,7 @@ static void vmemmap_restore_pte(pte_t *pte, unsigned long addr, struct page *page; void *to; - BUG_ON(pte_page(*pte) != walk->reuse_page); + BUG_ON(pte_page(ptep_get(pte)) != walk->reuse_page); page = list_first_entry(walk->vmemmap_pages, struct page, lru); list_del(&page->lru); diff --git a/mm/kasan/init.c b/mm/kasan/init.c index cc64ed6858c6..dcfec277e839 100644 --- a/mm/kasan/init.c +++ b/mm/kasan/init.c @@ -286,7 +286,7 @@ static void kasan_free_pte(pte_t *pte_start, pmd_t *pmd) for (i = 0; i < PTRS_PER_PTE; i++) { pte = pte_start + i; - if (!pte_none(*pte)) + if (!pte_none(ptep_get(pte))) return; } @@ -343,16 +343,19 @@ static void kasan_remove_pte_table(pte_t *pte, unsigned long addr, unsigned long end) { unsigned long next; + pte_t ptent; for (; addr < end; addr = next, pte++) { next = (addr + PAGE_SIZE) & PAGE_MASK; if (next > end) next = end; - if (!pte_present(*pte)) + ptent = ptep_get(pte); + + if (!pte_present(ptent)) continue; - if (WARN_ON(!kasan_early_shadow_page_entry(*pte))) + if (WARN_ON(!kasan_early_shadow_page_entry(ptent))) continue; pte_clear(&init_mm, addr, pte); } diff --git a/mm/kasan/shadow.c b/mm/kasan/shadow.c index 3e62728ae25d..dd772f9d0f08 100644 --- a/mm/kasan/shadow.c +++ b/mm/kasan/shadow.c @@ -226,7 +226,7 @@ static bool shadow_mapped(unsigned long addr) if (pmd_bad(*pmd)) return true; pte = pte_offset_kernel(pmd, addr); - return !pte_none(*pte); + return !pte_none(ptep_get(pte)); } static int __meminit kasan_mem_notifier(struct notifier_block *nb, @@ -317,7 +317,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, unsigned long page; pte_t pte; - if (likely(!pte_none(*ptep))) + if (likely(!pte_none(ptep_get(ptep)))) return 0; page = __get_free_page(GFP_KERNEL); @@ -328,7 +328,7 @@ static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr, pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL); spin_lock(&init_mm.page_table_lock); - if (likely(pte_none(*ptep))) { + if (likely(pte_none(ptep_get(ptep)))) { set_pte_at(&init_mm, addr, ptep, pte); page = 0; } @@ -418,11 +418,11 @@ static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr, { unsigned long page; - page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT); + page = (unsigned long)__va(pte_pfn(ptep_get(ptep)) << PAGE_SHIFT); spin_lock(&init_mm.page_table_lock); - if (likely(!pte_none(*ptep))) { + if (likely(!pte_none(ptep_get(ptep)))) { pte_clear(&init_mm, addr, ptep); free_page(page); } diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 881669e738c0..0b4f00712895 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -511,7 +511,7 @@ static void release_pte_pages(pte_t *pte, pte_t *_pte, struct folio *folio, *tmp; while (--_pte >= pte) { - pte_t pteval = *_pte; + pte_t pteval = ptep_get(_pte); unsigned long pfn; if (pte_none(pteval)) @@ -555,7 +555,7 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma, for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { - pte_t pteval = *_pte; + pte_t pteval = ptep_get(_pte); if (pte_none(pteval) || (pte_present(pteval) && is_zero_pfn(pte_pfn(pteval)))) { ++none_or_zero; @@ -699,7 +699,7 @@ static void __collapse_huge_page_copy_succeeded(pte_t *pte, for (_pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, address += PAGE_SIZE) { - pteval = *_pte; + pteval = ptep_get(_pte); if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1); if (is_zero_pfn(pte_pfn(pteval))) { @@ -797,7 +797,7 @@ static int __collapse_huge_page_copy(pte_t *pte, */ for (_pte = pte, _address = address; _pte < pte + HPAGE_PMD_NR; _pte++, page++, _address += PAGE_SIZE) { - pteval = *_pte; + pteval = ptep_get(_pte); if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) { clear_user_highpage(page, _address); continue; @@ -1274,7 +1274,7 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm, for (_address = address, _pte = pte; _pte < pte + HPAGE_PMD_NR; _pte++, _address += PAGE_SIZE) { - pte_t pteval = *_pte; + pte_t pteval = ptep_get(_pte); if (is_swap_pte(pteval)) { ++unmapped; if (!cc->is_khugepaged || @@ -1650,18 +1650,19 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { struct page *page; + pte_t ptent = ptep_get(pte); /* empty pte, skip */ - if (pte_none(*pte)) + if (pte_none(ptent)) continue; /* page swapped out, abort */ - if (!pte_present(*pte)) { + if (!pte_present(ptent)) { result = SCAN_PTE_NON_PRESENT; goto abort; } - page = vm_normal_page(vma, addr, *pte); + page = vm_normal_page(vma, addr, ptent); if (WARN_ON_ONCE(page && is_zone_device_page(page))) page = NULL; /* @@ -1677,10 +1678,11 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, for (i = 0, addr = haddr, pte = start_pte; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) { struct page *page; + pte_t ptent = ptep_get(pte); - if (pte_none(*pte)) + if (pte_none(ptent)) continue; - page = vm_normal_page(vma, addr, *pte); + page = vm_normal_page(vma, addr, ptent); if (WARN_ON_ONCE(page && is_zone_device_page(page))) goto abort; page_remove_rmap(page, vma, false); diff --git a/mm/ksm.c b/mm/ksm.c index 3dc15459dd20..d995779dc1fe 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -429,15 +429,17 @@ static int break_ksm_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long nex struct page *page = NULL; spinlock_t *ptl; pte_t *pte; + pte_t ptent; int ret; pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl); if (!pte) return 0; - if (pte_present(*pte)) { - page = vm_normal_page(walk->vma, addr, *pte); - } else if (!pte_none(*pte)) { - swp_entry_t entry = pte_to_swp_entry(*pte); + ptent = ptep_get(pte); + if (pte_present(ptent)) { + page = vm_normal_page(walk->vma, addr, ptent); + } else if (!pte_none(ptent)) { + swp_entry_t entry = pte_to_swp_entry(ptent); /* * As KSM pages remain KSM pages until freed, no need to wait @@ -1085,6 +1087,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, int err = -EFAULT; struct mmu_notifier_range range; bool anon_exclusive; + pte_t entry; pvmw.address = page_address_in_vma(page, vma); if (pvmw.address == -EFAULT) @@ -1102,10 +1105,9 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, goto out_unlock; anon_exclusive = PageAnonExclusive(page); - if (pte_write(*pvmw.pte) || pte_dirty(*pvmw.pte) || + entry = ptep_get(pvmw.pte); + if (pte_write(entry) || pte_dirty(entry) || anon_exclusive || mm_tlb_flush_pending(mm)) { - pte_t entry; - swapped = PageSwapCache(page); flush_cache_page(vma, pvmw.address, page_to_pfn(page)); /* @@ -1147,7 +1149,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry); } - *orig_pte = *pvmw.pte; + *orig_pte = entry; err = 0; out_unlock: @@ -1204,7 +1206,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); if (!ptep) goto out_mn; - if (!pte_same(*ptep, orig_pte)) { + if (!pte_same(ptep_get(ptep), orig_pte)) { pte_unmap_unlock(ptep, ptl); goto out_mn; } @@ -1231,7 +1233,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, dec_mm_counter(mm, MM_ANONPAGES); } - flush_cache_page(vma, addr, pte_pfn(*ptep)); + flush_cache_page(vma, addr, pte_pfn(ptep_get(ptep))); /* * No need to notify as we are replacing a read only page with another * read only page with the same content. diff --git a/mm/madvise.c b/mm/madvise.c index 9b3c9610052f..886f06066622 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -207,7 +207,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, break; } - pte = *ptep; + pte = ptep_get(ptep); if (!is_swap_pte(pte)) continue; entry = pte_to_swp_entry(pte); @@ -438,7 +438,7 @@ regular_folio: flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr < end; pte++, addr += PAGE_SIZE) { - ptent = *pte; + ptent = ptep_get(pte); if (pte_none(ptent)) continue; @@ -642,7 +642,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr, flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); for (; addr != end; pte++, addr += PAGE_SIZE) { - ptent = *pte; + ptent = ptep_get(pte); if (pte_none(ptent)) continue; diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c index 87b4beeda4fa..a26dd8bcfcdb 100644 --- a/mm/mapping_dirty_helpers.c +++ b/mm/mapping_dirty_helpers.c @@ -35,7 +35,7 @@ static int wp_pte(pte_t *pte, unsigned long addr, unsigned long end, struct mm_walk *walk) { struct wp_walk *wpwalk = walk->private; - pte_t ptent = *pte; + pte_t ptent = ptep_get(pte); if (pte_write(ptent)) { pte_t old_pte = ptep_modify_prot_start(walk->vma, addr, pte); @@ -91,7 +91,7 @@ static int clean_record_pte(pte_t *pte, unsigned long addr, { struct wp_walk *wpwalk = walk->private; struct clean_walk *cwalk = to_clean_walk(wpwalk); - pte_t ptent = *pte; + pte_t ptent = ptep_get(pte); if (pte_dirty(ptent)) { pgoff_t pgoff = ((addr - walk->vma->vm_start) >> PAGE_SHIFT) + diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 77d8d2d14fcf..93056918e956 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6025,7 +6025,7 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd, if (!pte) return 0; for (; addr != end; pte++, addr += PAGE_SIZE) - if (get_mctgt_type(vma, addr, *pte, NULL)) + if (get_mctgt_type(vma, addr, ptep_get(pte), NULL)) mc.precharge++; /* increment precharge temporarily */ pte_unmap_unlock(pte - 1, ptl); cond_resched(); @@ -6246,7 +6246,7 @@ retry: if (!pte) return 0; for (; addr != end; addr += PAGE_SIZE) { - pte_t ptent = *(pte++); + pte_t ptent = ptep_get(pte++); bool device = false; swp_entry_t ent; diff --git a/mm/memory-failure.c b/mm/memory-failure.c index d5116f0eb1b6..e245191e6b04 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -6,16 +6,16 @@ * High level machine check handler. Handles pages reported by the * hardware as being corrupted usually due to a multi-bit ECC memory or cache * failure. - * + * * In addition there is a "soft offline" entry point that allows stop using * not-yet-corrupted-by-suspicious pages without killing anything. * * Handles page cache pages in various states. The tricky part - * here is that we can access any page asynchronously in respect to - * other VM users, because memory failures could happen anytime and - * anywhere. This could violate some of their assumptions. This is why - * this code has to be extremely careful. Generally it tries to use - * normal locking rules, as in get the standard locks, even if that means + * here is that we can access any page asynchronously in respect to + * other VM users, because memory failures could happen anytime and + * anywhere. This could violate some of their assumptions. This is why + * this code has to be extremely careful. Generally it tries to use + * normal locking rules, as in get the standard locks, even if that means * the error handling takes potentially a long time. * * It can be very tempting to add handling for obscure cases here. @@ -25,12 +25,12 @@ * https://git.kernel.org/cgit/utils/cpu/mce/mce-test.git/ * - The case actually shows up as a frequent (top 10) page state in * tools/mm/page-types when running a real workload. - * + * * There are several operations here with exponential complexity because - * of unsuitable VM data structures. For example the operation to map back - * from RMAP chains to processes has to walk the complete process list and + * of unsuitable VM data structures. For example the operation to map back + * from RMAP chains to processes has to walk the complete process list and * has non linear complexity with the number. But since memory corruptions - * are rare we hope to get away with this. This avoids impacting the core + * are rare we hope to get away with this. This avoids impacting the core * VM. */ @@ -386,6 +386,7 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, pud_t *pud; pmd_t *pmd; pte_t *pte; + pte_t ptent; VM_BUG_ON_VMA(address == -EFAULT, vma); pgd = pgd_offset(vma->vm_mm, address); @@ -407,7 +408,8 @@ static unsigned long dev_pagemap_mapping_shift(struct vm_area_struct *vma, pte = pte_offset_map(pmd, address); if (!pte) return 0; - if (pte_present(*pte) && pte_devmap(*pte)) + ptent = ptep_get(pte); + if (pte_present(ptent) && pte_devmap(ptent)) ret = PAGE_SHIFT; pte_unmap(pte); return ret; @@ -799,7 +801,7 @@ static int hwpoison_pte_range(pmd_t *pmdp, unsigned long addr, goto out; for (; addr != end; ptep++, addr += PAGE_SIZE) { - ret = check_hwpoisoned_entry(*ptep, addr, PAGE_SHIFT, + ret = check_hwpoisoned_entry(ptep_get(ptep), addr, PAGE_SHIFT, hwp->pfn, &hwp->tk); if (ret == 1) break; diff --git a/mm/memory.c b/mm/memory.c index 63c30f58142b..3d78b552866d 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -699,15 +699,17 @@ static void restore_exclusive_pte(struct vm_area_struct *vma, struct page *page, unsigned long address, pte_t *ptep) { + pte_t orig_pte; pte_t pte; swp_entry_t entry; + orig_pte = ptep_get(ptep); pte = pte_mkold(mk_pte(page, READ_ONCE(vma->vm_page_prot))); - if (pte_swp_soft_dirty(*ptep)) + if (pte_swp_soft_dirty(orig_pte)) pte = pte_mksoft_dirty(pte); - entry = pte_to_swp_entry(*ptep); - if (pte_swp_uffd_wp(*ptep)) + entry = pte_to_swp_entry(orig_pte); + if (pte_swp_uffd_wp(orig_pte)) pte = pte_mkuffd_wp(pte); else if (is_writable_device_exclusive_entry(entry)) pte = maybe_mkwrite(pte_mkdirty(pte), vma); @@ -744,7 +746,7 @@ static int try_restore_exclusive_pte(pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr) { - swp_entry_t entry = pte_to_swp_entry(*src_pte); + swp_entry_t entry = pte_to_swp_entry(ptep_get(src_pte)); struct page *page = pfn_swap_entry_to_page(entry); if (trylock_page(page)) { @@ -768,9 +770,10 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, struct vm_area_struct *src_vma, unsigned long addr, int *rss) { unsigned long vm_flags = dst_vma->vm_flags; - pte_t pte = *src_pte; + pte_t orig_pte = ptep_get(src_pte); + pte_t pte = orig_pte; struct page *page; - swp_entry_t entry = pte_to_swp_entry(pte); + swp_entry_t entry = pte_to_swp_entry(orig_pte); if (likely(!non_swap_entry(entry))) { if (swap_duplicate(entry) < 0) @@ -785,8 +788,8 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, spin_unlock(&mmlist_lock); } /* Mark the swap entry as shared. */ - if (pte_swp_exclusive(*src_pte)) { - pte = pte_swp_clear_exclusive(*src_pte); + if (pte_swp_exclusive(orig_pte)) { + pte = pte_swp_clear_exclusive(orig_pte); set_pte_at(src_mm, addr, src_pte, pte); } rss[MM_SWAPENTS]++; @@ -805,9 +808,9 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, entry = make_readable_migration_entry( swp_offset(entry)); pte = swp_entry_to_pte(entry); - if (pte_swp_soft_dirty(*src_pte)) + if (pte_swp_soft_dirty(orig_pte)) pte = pte_swp_mksoft_dirty(pte); - if (pte_swp_uffd_wp(*src_pte)) + if (pte_swp_uffd_wp(orig_pte)) pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } @@ -840,7 +843,7 @@ copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, entry = make_readable_device_private_entry( swp_offset(entry)); pte = swp_entry_to_pte(entry); - if (pte_swp_uffd_wp(*src_pte)) + if (pte_swp_uffd_wp(orig_pte)) pte = pte_swp_mkuffd_wp(pte); set_pte_at(src_mm, addr, src_pte, pte); } @@ -904,7 +907,7 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma /* All done, just insert the new page copy in the child */ pte = mk_pte(&new_folio->page, dst_vma->vm_page_prot); pte = maybe_mkwrite(pte_mkdirty(pte), dst_vma); - if (userfaultfd_pte_wp(dst_vma, *src_pte)) + if (userfaultfd_pte_wp(dst_vma, ptep_get(src_pte))) /* Uffd-wp needs to be delivered to dest pte as well */ pte = pte_mkuffd_wp(pte); set_pte_at(dst_vma->vm_mm, addr, dst_pte, pte); @@ -922,7 +925,7 @@ copy_present_pte(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, { struct mm_struct *src_mm = src_vma->vm_mm; unsigned long vm_flags = src_vma->vm_flags; - pte_t pte = *src_pte; + pte_t pte = ptep_get(src_pte); struct page *page; struct folio *folio; @@ -1002,6 +1005,7 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, struct mm_struct *src_mm = src_vma->vm_mm; pte_t *orig_src_pte, *orig_dst_pte; pte_t *src_pte, *dst_pte; + pte_t ptent; spinlock_t *src_ptl, *dst_ptl; int progress, ret = 0; int rss[NR_MM_COUNTERS]; @@ -1047,17 +1051,18 @@ again: spin_needbreak(src_ptl) || spin_needbreak(dst_ptl)) break; } - if (pte_none(*src_pte)) { + ptent = ptep_get(src_pte); + if (pte_none(ptent)) { progress++; continue; } - if (unlikely(!pte_present(*src_pte))) { + if (unlikely(!pte_present(ptent))) { ret = copy_nonpresent_pte(dst_mm, src_mm, dst_pte, src_pte, dst_vma, src_vma, addr, rss); if (ret == -EIO) { - entry = pte_to_swp_entry(*src_pte); + entry = pte_to_swp_entry(ptep_get(src_pte)); break; } else if (ret == -EBUSY) { break; @@ -1407,7 +1412,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, flush_tlb_batched_pending(mm); arch_enter_lazy_mmu_mode(); do { - pte_t ptent = *pte; + pte_t ptent = ptep_get(pte); struct page *page; if (pte_none(ptent)) @@ -1822,7 +1827,7 @@ static int validate_page_before_insert(struct page *page) static int insert_page_into_pte_locked(struct vm_area_struct *vma, pte_t *pte, unsigned long addr, struct page *page, pgprot_t prot) { - if (!pte_none(*pte)) + if (!pte_none(ptep_get(pte))) return -EBUSY; /* Ok, finally just insert the thing.. */ get_page(page); @@ -2116,7 +2121,8 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, pte = get_locked_pte(mm, addr, &ptl); if (!pte) return VM_FAULT_OOM; - if (!pte_none(*pte)) { + entry = ptep_get(pte); + if (!pte_none(entry)) { if (mkwrite) { /* * For read faults on private mappings the PFN passed @@ -2128,11 +2134,11 @@ static vm_fault_t insert_pfn(struct vm_area_struct *vma, unsigned long addr, * allocation and mapping invalidation so just skip the * update. */ - if (pte_pfn(*pte) != pfn_t_to_pfn(pfn)) { - WARN_ON_ONCE(!is_zero_pfn(pte_pfn(*pte))); + if (pte_pfn(entry) != pfn_t_to_pfn(pfn)) { + WARN_ON_ONCE(!is_zero_pfn(pte_pfn(entry))); goto out_unlock; } - entry = pte_mkyoung(*pte); + entry = pte_mkyoung(entry); entry = maybe_mkwrite(pte_mkdirty(entry), vma); if (ptep_set_access_flags(vma, addr, pte, entry, 1)) update_mmu_cache(vma, addr, pte); @@ -2344,7 +2350,7 @@ static int remap_pte_range(struct mm_struct *mm, pmd_t *pmd, return -ENOMEM; arch_enter_lazy_mmu_mode(); do { - BUG_ON(!pte_none(*pte)); + BUG_ON(!pte_none(ptep_get(pte))); if (!pfn_modify_allowed(pfn, prot)) { err = -EACCES; break; @@ -2585,7 +2591,7 @@ static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, if (fn) { do { - if (create || !pte_none(*pte)) { + if (create || !pte_none(ptep_get(pte))) { err = fn(pte++, addr, data); if (err) break; @@ -2787,7 +2793,7 @@ static inline int pte_unmap_same(struct vm_fault *vmf) #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPTION) if (sizeof(pte_t) > sizeof(unsigned long)) { spin_lock(vmf->ptl); - same = pte_same(*vmf->pte, vmf->orig_pte); + same = pte_same(ptep_get(vmf->pte), vmf->orig_pte); spin_unlock(vmf->ptl); } #endif @@ -2838,7 +2844,7 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, pte_t entry; vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); - if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { /* * Other thread has already handled the fault * and update local tlb only @@ -2866,7 +2872,7 @@ static inline int __wp_page_copy_user(struct page *dst, struct page *src, /* Re-validate under PTL if the page is still mapped */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl); - if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ if (vmf->pte) update_mmu_tlb(vma, addr, vmf->pte); @@ -3114,7 +3120,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) * Re-check the pte - we dropped the lock */ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) { + if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { if (old_folio) { if (!folio_test_anon(old_folio)) { dec_mm_counter(mm, mm_counter_file(&old_folio->page)); @@ -3241,7 +3247,7 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf) * We might have raced with another page fault while we released the * pte_offset_map_lock. */ - if (!pte_same(*vmf->pte, vmf->orig_pte)) { + if (!pte_same(ptep_get(vmf->pte), vmf->orig_pte)) { update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); return VM_FAULT_NOPAGE; @@ -3336,7 +3342,7 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) struct folio *folio = NULL; if (likely(!unshare)) { - if (userfaultfd_pte_wp(vma, *vmf->pte)) { + if (userfaultfd_pte_wp(vma, ptep_get(vmf->pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); return handle_userfault(vmf, VM_UFFD_WP); } @@ -3598,7 +3604,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) + if (likely(vmf->pte && pte_same(ptep_get(vmf->pte), vmf->orig_pte))) restore_exclusive_pte(vma, vmf->page, vmf->address, vmf->pte); if (vmf->pte) @@ -3643,7 +3649,7 @@ static vm_fault_t pte_marker_clear(struct vm_fault *vmf) * quickly from a PTE_MARKER_UFFD_WP into PTE_MARKER_SWAPIN_ERROR. * So is_pte_marker() check is not enough to safely drop the pte. */ - if (pte_same(vmf->orig_pte, *vmf->pte)) + if (pte_same(vmf->orig_pte, ptep_get(vmf->pte))) pte_clear(vmf->vma->vm_mm, vmf->address, vmf->pte); pte_unmap_unlock(vmf->pte, vmf->ptl); return 0; @@ -3739,7 +3745,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); if (unlikely(!vmf->pte || - !pte_same(*vmf->pte, vmf->orig_pte))) + !pte_same(ptep_get(vmf->pte), + vmf->orig_pte))) goto unlock; /* @@ -3816,7 +3823,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (likely(vmf->pte && pte_same(*vmf->pte, vmf->orig_pte))) + if (likely(vmf->pte && + pte_same(ptep_get(vmf->pte), vmf->orig_pte))) ret = VM_FAULT_OOM; goto unlock; } @@ -3886,7 +3894,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) */ vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, vmf->address, &vmf->ptl); - if (unlikely(!vmf->pte || !pte_same(*vmf->pte, vmf->orig_pte))) + if (unlikely(!vmf->pte || !pte_same(ptep_get(vmf->pte), vmf->orig_pte))) goto out_nomap; if (unlikely(!folio_test_uptodate(folio))) { @@ -4331,9 +4339,9 @@ void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr) static bool vmf_pte_changed(struct vm_fault *vmf) { if (vmf->flags & FAULT_FLAG_ORIG_PTE_VALID) - return !pte_same(*vmf->pte, vmf->orig_pte); + return !pte_same(ptep_get(vmf->pte), vmf->orig_pte); - return !pte_none(*vmf->pte); + return !pte_none(ptep_get(vmf->pte)); } /** @@ -4643,7 +4651,7 @@ static vm_fault_t do_fault(struct vm_fault *vmf) * we don't have concurrent modification by hardware * followed by an update. */ - if (unlikely(pte_none(*vmf->pte))) + if (unlikely(pte_none(ptep_get(vmf->pte)))) ret = VM_FAULT_SIGBUS; else ret = VM_FAULT_NOPAGE; @@ -4699,7 +4707,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) * the pfn may be screwed if the read is non atomic. */ spin_lock(vmf->ptl); - if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; } @@ -4772,7 +4780,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf) vmf->address, &vmf->ptl); if (unlikely(!vmf->pte)) goto out; - if (unlikely(!pte_same(*vmf->pte, vmf->orig_pte))) { + if (unlikely(!pte_same(ptep_get(vmf->pte), vmf->orig_pte))) { pte_unmap_unlock(vmf->pte, vmf->ptl); goto out; } @@ -4930,7 +4938,7 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf) spin_lock(vmf->ptl); entry = vmf->orig_pte; - if (unlikely(!pte_same(*vmf->pte, entry))) { + if (unlikely(!pte_same(ptep_get(vmf->pte), entry))) { update_mmu_tlb(vmf->vma, vmf->address, vmf->pte); goto unlock; } @@ -5416,7 +5424,7 @@ int follow_pte(struct mm_struct *mm, unsigned long address, ptep = pte_offset_map_lock(mm, pmd, address, ptlp); if (!ptep) goto out; - if (!pte_present(*ptep)) + if (!pte_present(ptep_get(ptep))) goto unlock; *ptepp = ptep; return 0; @@ -5453,7 +5461,7 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address, ret = follow_pte(vma->vm_mm, address, &ptep, &ptl); if (ret) return ret; - *pfn = pte_pfn(*ptep); + *pfn = pte_pfn(ptep_get(ptep)); pte_unmap_unlock(ptep, ptl); return 0; } @@ -5473,7 +5481,7 @@ int follow_phys(struct vm_area_struct *vma, if (follow_pte(vma->vm_mm, address, &ptep, &ptl)) goto out; - pte = *ptep; + pte = ptep_get(ptep); if ((flags & FOLL_WRITE) && !pte_write(pte)) goto unlock; @@ -5517,7 +5525,7 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, retry: if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) return -EINVAL; - pte = *ptep; + pte = ptep_get(ptep); pte_unmap_unlock(ptep, ptl); prot = pgprot_val(pte_pgprot(pte)); @@ -5533,7 +5541,7 @@ retry: if (follow_pte(vma->vm_mm, addr, &ptep, &ptl)) goto out_unmap; - if (!pte_same(pte, *ptep)) { + if (!pte_same(pte, ptep_get(ptep))) { pte_unmap_unlock(ptep, ptl); iounmap(maddr); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 0241bb64978b..edc25195f5bd 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -508,6 +508,7 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, unsigned long flags = qp->flags; bool has_unmovable = false; pte_t *pte, *mapped_pte; + pte_t ptent; spinlock_t *ptl; ptl = pmd_trans_huge_lock(pmd, vma); @@ -520,9 +521,10 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr, return 0; } for (; addr != end; pte++, addr += PAGE_SIZE) { - if (!pte_present(*pte)) + ptent = ptep_get(pte); + if (!pte_present(ptent)) continue; - folio = vm_normal_folio(vma, addr, *pte); + folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; /* diff --git a/mm/migrate.c b/mm/migrate.c index 363562992046..ce35afdbc1e3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -188,6 +188,7 @@ static bool remove_migration_pte(struct folio *folio, while (page_vma_mapped_walk(&pvmw)) { rmap_t rmap_flags = RMAP_NONE; + pte_t old_pte; pte_t pte; swp_entry_t entry; struct page *new; @@ -210,17 +211,18 @@ static bool remove_migration_pte(struct folio *folio, folio_get(folio); pte = mk_pte(new, READ_ONCE(vma->vm_page_prot)); - if (pte_swp_soft_dirty(*pvmw.pte)) + old_pte = ptep_get(pvmw.pte); + if (pte_swp_soft_dirty(old_pte)) pte = pte_mksoft_dirty(pte); - entry = pte_to_swp_entry(*pvmw.pte); + entry = pte_to_swp_entry(old_pte); if (!is_migration_entry_young(entry)) pte = pte_mkold(pte); if (folio_test_dirty(folio) && is_migration_entry_dirty(entry)) pte = pte_mkdirty(pte); if (is_writable_migration_entry(entry)) pte = pte_mkwrite(pte); - else if (pte_swp_uffd_wp(*pvmw.pte)) + else if (pte_swp_uffd_wp(old_pte)) pte = pte_mkuffd_wp(pte); if (folio_test_anon(folio) && !is_readable_migration_entry(entry)) @@ -234,9 +236,9 @@ static bool remove_migration_pte(struct folio *folio, entry = make_readable_device_private_entry( page_to_pfn(new)); pte = swp_entry_to_pte(entry); - if (pte_swp_soft_dirty(*pvmw.pte)) + if (pte_swp_soft_dirty(old_pte)) pte = pte_swp_mksoft_dirty(pte); - if (pte_swp_uffd_wp(*pvmw.pte)) + if (pte_swp_uffd_wp(old_pte)) pte = pte_swp_mkuffd_wp(pte); } @@ -308,7 +310,7 @@ void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, if (!ptep) return; - pte = *ptep; + pte = ptep_get(ptep); pte_unmap(ptep); if (!is_swap_pte(pte)) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index a14af6b12b04..02d272b909b5 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -111,7 +111,7 @@ again: swp_entry_t entry; pte_t pte; - pte = *ptep; + pte = ptep_get(ptep); if (pte_none(pte)) { if (vma_is_anonymous(vma)) { @@ -194,7 +194,7 @@ again: bool anon_exclusive; pte_t swp_pte; - flush_cache_page(vma, addr, pte_pfn(*ptep)); + flush_cache_page(vma, addr, pte_pfn(pte)); anon_exclusive = PageAnon(page) && PageAnonExclusive(page); if (anon_exclusive) { pte = ptep_clear_flush(vma, addr, ptep); @@ -573,6 +573,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, pud_t *pudp; pmd_t *pmdp; pte_t *ptep; + pte_t orig_pte; /* Only allow populating anonymous memory */ if (!vma_is_anonymous(vma)) @@ -628,16 +629,18 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl); if (!ptep) goto abort; + orig_pte = ptep_get(ptep); + if (check_stable_address_space(mm)) goto unlock_abort; - if (pte_present(*ptep)) { - unsigned long pfn = pte_pfn(*ptep); + if (pte_present(orig_pte)) { + unsigned long pfn = pte_pfn(orig_pte); if (!is_zero_pfn(pfn)) goto unlock_abort; flush = true; - } else if (!pte_none(*ptep)) + } else if (!pte_none(orig_pte)) goto unlock_abort; /* @@ -654,7 +657,7 @@ static void migrate_vma_insert_page(struct migrate_vma *migrate, get_page(page); if (flush) { - flush_cache_page(vma, addr, pte_pfn(*ptep)); + flush_cache_page(vma, addr, pte_pfn(orig_pte)); ptep_clear_flush_notify(vma, addr, ptep); set_pte_at_notify(mm, addr, ptep, entry); update_mmu_cache(vma, addr, ptep); diff --git a/mm/mincore.c b/mm/mincore.c index f33f6a0b1ded..b7f7a516b26c 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -119,7 +119,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, return 0; } for (; addr != end; ptep++, addr += PAGE_SIZE) { - pte_t pte = *ptep; + pte_t pte = ptep_get(ptep); /* We need to do cache lookup too for pte markers */ if (pte_none_mostly(pte)) diff --git a/mm/mlock.c b/mm/mlock.c index 9f2b1173b1b1..d7db94519884 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -312,6 +312,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, struct vm_area_struct *vma = walk->vma; spinlock_t *ptl; pte_t *start_pte, *pte; + pte_t ptent; struct folio *folio; ptl = pmd_trans_huge_lock(pmd, vma); @@ -334,9 +335,10 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr, return 0; } for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { - if (!pte_present(*pte)) + ptent = ptep_get(pte); + if (!pte_present(ptent)) continue; - folio = vm_normal_folio(vma, addr, *pte); + folio = vm_normal_folio(vma, addr, ptent); if (!folio || folio_is_zone_device(folio)) continue; if (folio_test_large(folio)) diff --git a/mm/mprotect.c b/mm/mprotect.c index 64e1df0af514..327a6eb90afb 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c @@ -105,7 +105,7 @@ static long change_pte_range(struct mmu_gather *tlb, flush_tlb_batched_pending(vma->vm_mm); arch_enter_lazy_mmu_mode(); do { - oldpte = *pte; + oldpte = ptep_get(pte); if (pte_present(oldpte)) { pte_t ptent; @@ -544,7 +544,8 @@ long change_protection(struct mmu_gather *tlb, static int prot_none_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { - return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? + return pfn_modify_allowed(pte_pfn(ptep_get(pte)), + *(pgprot_t *)(walk->private)) ? 0 : -EACCES; } @@ -552,7 +553,8 @@ static int prot_none_hugetlb_entry(pte_t *pte, unsigned long hmask, unsigned long addr, unsigned long next, struct mm_walk *walk) { - return pfn_modify_allowed(pte_pfn(*pte), *(pgprot_t *)(walk->private)) ? + return pfn_modify_allowed(pte_pfn(ptep_get(pte)), + *(pgprot_t *)(walk->private)) ? 0 : -EACCES; } diff --git a/mm/mremap.c b/mm/mremap.c index bfc3d1902a94..8ec184ac90ff 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -188,7 +188,7 @@ static int move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE, new_pte++, new_addr += PAGE_SIZE) { - if (pte_none(*old_pte)) + if (pte_none(ptep_get(old_pte))) continue; pte = ptep_get_and_clear(mm, old_addr, old_pte); diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 0c511330dbc9..8f89f9c8f0df 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -190,7 +190,7 @@ void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr, if (&init_mm == mm) return; - __page_table_check_pte_clear(mm, addr, *ptep); + __page_table_check_pte_clear(mm, addr, ptep_get(ptep)); if (pte_user_accessible_page(pte)) { page_table_check_set(mm, addr, pte_pfn(pte), PAGE_SIZE >> PAGE_SHIFT, @@ -243,7 +243,7 @@ void __page_table_check_pte_clear_range(struct mm_struct *mm, if (WARN_ON(!ptep)) return; for (i = 0; i < PTRS_PER_PTE; i++) { - __page_table_check_pte_clear(mm, addr, *ptep); + __page_table_check_pte_clear(mm, addr, ptep_get(ptep)); addr += PAGE_SIZE; ptep++; } diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c index 2af734274073..49e0d28f0379 100644 --- a/mm/page_vma_mapped.c +++ b/mm/page_vma_mapped.c @@ -15,6 +15,8 @@ static inline bool not_found(struct page_vma_mapped_walk *pvmw) static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) { + pte_t ptent; + if (pvmw->flags & PVMW_SYNC) { /* Use the stricter lookup */ pvmw->pte = pte_offset_map_lock(pvmw->vma->vm_mm, pvmw->pmd, @@ -35,10 +37,12 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) if (!pvmw->pte) return false; + ptent = ptep_get(pvmw->pte); + if (pvmw->flags & PVMW_MIGRATION) { - if (!is_swap_pte(*pvmw->pte)) + if (!is_swap_pte(ptent)) return false; - } else if (is_swap_pte(*pvmw->pte)) { + } else if (is_swap_pte(ptent)) { swp_entry_t entry; /* * Handle un-addressable ZONE_DEVICE memory. @@ -56,11 +60,11 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) * For more details on device private memory see HMM * (include/linux/hmm.h or mm/hmm.c). */ - entry = pte_to_swp_entry(*pvmw->pte); + entry = pte_to_swp_entry(ptent); if (!is_device_private_entry(entry) && !is_device_exclusive_entry(entry)) return false; - } else if (!pte_present(*pvmw->pte)) { + } else if (!pte_present(ptent)) { return false; } pvmw->ptl = *ptlp; @@ -90,33 +94,34 @@ static bool map_pte(struct page_vma_mapped_walk *pvmw, spinlock_t **ptlp) static bool check_pte(struct page_vma_mapped_walk *pvmw) { unsigned long pfn; + pte_t ptent = ptep_get(pvmw->pte); if (pvmw->flags & PVMW_MIGRATION) { swp_entry_t entry; - if (!is_swap_pte(*pvmw->pte)) + if (!is_swap_pte(ptent)) return false; - entry = pte_to_swp_entry(*pvmw->pte); + entry = pte_to_swp_entry(ptent); if (!is_migration_entry(entry) && !is_device_exclusive_entry(entry)) return false; pfn = swp_offset_pfn(entry); - } else if (is_swap_pte(*pvmw->pte)) { + } else if (is_swap_pte(ptent)) { swp_entry_t entry; /* Handle un-addressable ZONE_DEVICE memory */ - entry = pte_to_swp_entry(*pvmw->pte); + entry = pte_to_swp_entry(ptent); if (!is_device_private_entry(entry) && !is_device_exclusive_entry(entry)) return false; pfn = swp_offset_pfn(entry); } else { - if (!pte_present(*pvmw->pte)) + if (!pte_present(ptent)) return false; - pfn = pte_pfn(*pvmw->pte); + pfn = pte_pfn(ptent); } return (pfn - pvmw->pfn) < pvmw->nr_pages; @@ -294,7 +299,7 @@ next_pte: goto restart; } pvmw->pte++; - } while (pte_none(*pvmw->pte)); + } while (pte_none(ptep_get(pvmw->pte))); if (!pvmw->ptl) { pvmw->ptl = ptl; diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c index c7ab18a5fb77..4d454953046f 100644 --- a/mm/pgtable-generic.c +++ b/mm/pgtable-generic.c @@ -68,7 +68,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, pte_t *ptep, pte_t entry, int dirty) { - int changed = !pte_same(*ptep, entry); + int changed = !pte_same(ptep_get(ptep), entry); if (changed) { set_pte_at(vma->vm_mm, address, ptep, entry); flush_tlb_fix_spurious_fault(vma, address, ptep); diff --git a/mm/rmap.c b/mm/rmap.c index cd918cb9a431..0c0d8857dfce 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -826,7 +826,8 @@ static bool folio_referenced_one(struct folio *folio, } if (pvmw.pte) { - if (lru_gen_enabled() && pte_young(*pvmw.pte)) { + if (lru_gen_enabled() && + pte_young(ptep_get(pvmw.pte))) { lru_gen_look_around(&pvmw); referenced++; } @@ -956,13 +957,13 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw) address = pvmw->address; if (pvmw->pte) { - pte_t entry; pte_t *pte = pvmw->pte; + pte_t entry = ptep_get(pte); - if (!pte_dirty(*pte) && !pte_write(*pte)) + if (!pte_dirty(entry) && !pte_write(entry)) continue; - flush_cache_page(vma, address, pte_pfn(*pte)); + flush_cache_page(vma, address, pte_pfn(entry)); entry = ptep_clear_flush(vma, address, pte); entry = pte_wrprotect(entry); entry = pte_mkclean(entry); @@ -1137,7 +1138,7 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) * @folio: Folio which contains page. * @page: Page to add to rmap. * @vma: VM area to add page to. - * @address: User virtual address of the mapping + * @address: User virtual address of the mapping * @exclusive: the page is exclusively owned by the current process */ static void __page_set_anon_rmap(struct folio *folio, struct page *page, @@ -1458,6 +1459,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; + unsigned long pfn; /* * When racing against e.g. zap_pte_range() on another cpu, @@ -1508,8 +1510,8 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, break; } - subpage = folio_page(folio, - pte_pfn(*pvmw.pte) - folio_pfn(folio)); + pfn = pte_pfn(ptep_get(pvmw.pte)); + subpage = folio_page(folio, pfn - folio_pfn(folio)); address = pvmw.address; anon_exclusive = folio_test_anon(folio) && PageAnonExclusive(subpage); @@ -1571,7 +1573,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma, } pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { - flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); + flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* @@ -1818,6 +1820,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, bool anon_exclusive, ret = true; struct mmu_notifier_range range; enum ttu_flags flags = (enum ttu_flags)(long)arg; + unsigned long pfn; /* * When racing against e.g. zap_pte_range() on another cpu, @@ -1877,6 +1880,8 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); + pfn = pte_pfn(ptep_get(pvmw.pte)); + if (folio_is_zone_device(folio)) { /* * Our PTE is a non-present device exclusive entry and @@ -1891,8 +1896,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, VM_BUG_ON_FOLIO(folio_nr_pages(folio) > 1, folio); subpage = &folio->page; } else { - subpage = folio_page(folio, - pte_pfn(*pvmw.pte) - folio_pfn(folio)); + subpage = folio_page(folio, pfn - folio_pfn(folio)); } address = pvmw.address; anon_exclusive = folio_test_anon(folio) && @@ -1952,7 +1956,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma, /* Nuke the hugetlb page table entry */ pteval = huge_ptep_clear_flush(vma, address, pvmw.pte); } else { - flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); + flush_cache_page(vma, address, pfn); /* Nuke the page table entry. */ if (should_defer_flush(mm, flags)) { /* @@ -2187,6 +2191,7 @@ static bool page_make_device_exclusive_one(struct folio *folio, struct mmu_notifier_range range; swp_entry_t entry; pte_t swp_pte; + pte_t ptent; mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma->vm_mm, address, min(vma->vm_end, @@ -2198,18 +2203,19 @@ static bool page_make_device_exclusive_one(struct folio *folio, /* Unexpected PMD-mapped THP? */ VM_BUG_ON_FOLIO(!pvmw.pte, folio); - if (!pte_present(*pvmw.pte)) { + ptent = ptep_get(pvmw.pte); + if (!pte_present(ptent)) { ret = false; page_vma_mapped_walk_done(&pvmw); break; } subpage = folio_page(folio, - pte_pfn(*pvmw.pte) - folio_pfn(folio)); + pte_pfn(ptent) - folio_pfn(folio)); address = pvmw.address; /* Nuke the page table entry. */ - flush_cache_page(vma, address, pte_pfn(*pvmw.pte)); + flush_cache_page(vma, address, pte_pfn(ptent)); pteval = ptep_clear_flush(vma, address, pvmw.pte); /* Set the dirty flag on the folio now the pte is gone. */ diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 10d73a0dfcec..a044a130405b 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -133,7 +133,7 @@ static void * __meminit altmap_alloc_block_buf(unsigned long size, void __meminit vmemmap_verify(pte_t *pte, int node, unsigned long start, unsigned long end) { - unsigned long pfn = pte_pfn(*pte); + unsigned long pfn = pte_pfn(ptep_get(pte)); int actual_node = early_pfn_to_nid(pfn); if (node_distance(actual_node, node) > LOCAL_DISTANCE) @@ -146,7 +146,7 @@ pte_t * __meminit vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node, struct page *reuse) { pte_t *pte = pte_offset_kernel(pmd, addr); - if (pte_none(*pte)) { + if (pte_none(ptep_get(pte))) { pte_t entry; void *p; @@ -414,7 +414,7 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, * with just tail struct pages. */ return vmemmap_populate_range(start, end, node, NULL, - pte_page(*pte)); + pte_page(ptep_get(pte))); } size = min(end - start, pgmap_vmemmap_nr(pgmap) * sizeof(struct page)); @@ -438,7 +438,7 @@ static int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn, */ next += PAGE_SIZE; rc = vmemmap_populate_range(next, last, node, NULL, - pte_page(*pte)); + pte_page(ptep_get(pte))); if (rc) return -ENOMEM; } diff --git a/mm/swap_state.c b/mm/swap_state.c index a33c60e0158f..4a5c7b748051 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -275,9 +275,9 @@ void clear_shadow_from_swap_cache(int type, unsigned long begin, } } -/* - * If we are the only user, then try to free up the swap cache. - * +/* + * If we are the only user, then try to free up the swap cache. + * * Its ok to check the swapcache flag without the folio lock * here because we are going to recheck again inside * folio_free_swap() _with_ the lock. @@ -294,7 +294,7 @@ void free_swap_cache(struct page *page) } } -/* +/* * Perform a free_page(), also freeing any swap cache associated with * this page if it is the last user of the page. */ diff --git a/mm/swapfile.c b/mm/swapfile.c index 74dd4d2337b7..a6945c2e0d03 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1745,7 +1745,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, struct page *page = folio_file_page(folio, swp_offset(entry)); struct page *swapcache; spinlock_t *ptl; - pte_t *pte, new_pte; + pte_t *pte, new_pte, old_pte; bool hwposioned = false; int ret = 1; @@ -1757,11 +1757,14 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, hwposioned = true; pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); - if (unlikely(!pte || !pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { + if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), + swp_entry_to_pte(entry)))) { ret = 0; goto out; } + old_pte = ptep_get(pte); + if (unlikely(hwposioned || !PageUptodate(page))) { swp_entry_t swp_entry; @@ -1793,7 +1796,7 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, * call and have the page locked. */ VM_BUG_ON_PAGE(PageWriteback(page), page); - if (pte_swp_exclusive(*pte)) + if (pte_swp_exclusive(old_pte)) rmap_flags |= RMAP_EXCLUSIVE; page_add_anon_rmap(page, vma, addr, rmap_flags); @@ -1802,9 +1805,9 @@ static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, lru_cache_add_inactive_or_unevictable(page, vma); } new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); - if (pte_swp_soft_dirty(*pte)) + if (pte_swp_soft_dirty(old_pte)) new_pte = pte_mksoft_dirty(new_pte); - if (pte_swp_uffd_wp(*pte)) + if (pte_swp_uffd_wp(old_pte)) new_pte = pte_mkuffd_wp(new_pte); setpte: set_pte_at(vma->vm_mm, addr, pte, new_pte); @@ -1833,6 +1836,7 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, unsigned char swp_count; swp_entry_t entry; int ret; + pte_t ptent; if (!pte++) { pte = pte_offset_map(pmd, addr); @@ -1840,10 +1844,12 @@ static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, break; } - if (!is_swap_pte(*pte)) + ptent = ptep_get_lockless(pte); + + if (!is_swap_pte(ptent)) continue; - entry = pte_to_swp_entry(*pte); + entry = pte_to_swp_entry(ptent); if (swp_type(entry) != type) continue; diff --git a/mm/userfaultfd.c b/mm/userfaultfd.c index 5fd787158c70..a2bf37ee276d 100644 --- a/mm/userfaultfd.c +++ b/mm/userfaultfd.c @@ -97,7 +97,7 @@ int mfill_atomic_install_pte(pmd_t *dst_pmd, * registered, we firstly wr-protect a none pte which has no page cache * page backing it, then access the page. */ - if (!pte_none_mostly(*dst_pte)) + if (!pte_none_mostly(ptep_get(dst_pte))) goto out_unlock; folio = page_folio(page); @@ -230,7 +230,7 @@ static int mfill_atomic_pte_zeropage(pmd_t *dst_pmd, goto out_unlock; } ret = -EEXIST; - if (!pte_none(*dst_pte)) + if (!pte_none(ptep_get(dst_pte))) goto out_unlock; set_pte_at(dst_vma->vm_mm, dst_addr, dst_pte, _dst_pte); /* No need to invalidate - it was non-present before */ diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 7382e0a60ce1..5a3bf408251b 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -103,7 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end, if (!pte) return -ENOMEM; do { - BUG_ON(!pte_none(*pte)); + BUG_ON(!pte_none(ptep_get(pte))); #ifdef CONFIG_HUGETLB_PAGE size = arch_vmap_pte_range_map_size(addr, end, pfn, max_page_shift); @@ -472,7 +472,7 @@ static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr, do { struct page *page = pages[*nr]; - if (WARN_ON(!pte_none(*pte))) + if (WARN_ON(!pte_none(ptep_get(pte)))) return -EBUSY; if (WARN_ON(!page)) return -ENOMEM; @@ -704,7 +704,7 @@ struct page *vmalloc_to_page(const void *vmalloc_addr) return NULL; ptep = pte_offset_kernel(pmd, addr); - pte = *ptep; + pte = ptep_get(ptep); if (pte_present(pte)) page = pte_page(pte); diff --git a/mm/vmscan.c b/mm/vmscan.c index 3f64c8d9f629..e305c11ec8fc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4037,15 +4037,16 @@ restart: for (i = pte_index(start), addr = start; addr != end; i++, addr += PAGE_SIZE) { unsigned long pfn; struct folio *folio; + pte_t ptent = ptep_get(pte + i); total++; walk->mm_stats[MM_LEAF_TOTAL]++; - pfn = get_pte_pfn(pte[i], args->vma, addr); + pfn = get_pte_pfn(ptent, args->vma, addr); if (pfn == -1) continue; - if (!pte_young(pte[i])) { + if (!pte_young(ptent)) { walk->mm_stats[MM_LEAF_OLD]++; continue; } @@ -4060,7 +4061,7 @@ restart: young++; walk->mm_stats[MM_LEAF_YOUNG]++; - if (pte_dirty(pte[i]) && !folio_test_dirty(folio) && + if (pte_dirty(ptent) && !folio_test_dirty(folio) && !(folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio))) folio_mark_dirty(folio); @@ -4703,12 +4704,13 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) for (i = 0, addr = start; addr != end; i++, addr += PAGE_SIZE) { unsigned long pfn; + pte_t ptent = ptep_get(pte + i); - pfn = get_pte_pfn(pte[i], pvmw->vma, addr); + pfn = get_pte_pfn(ptent, pvmw->vma, addr); if (pfn == -1) continue; - if (!pte_young(pte[i])) + if (!pte_young(ptent)) continue; folio = get_pfn_folio(pfn, memcg, pgdat, !walk || walk->can_swap); @@ -4720,7 +4722,7 @@ void lru_gen_look_around(struct page_vma_mapped_walk *pvmw) young++; - if (pte_dirty(pte[i]) && !folio_test_dirty(folio) && + if (pte_dirty(ptent) && !folio_test_dirty(folio) && !(folio_test_anon(folio) && folio_test_swapbacked(folio) && !folio_test_swapcache(folio))) folio_mark_dirty(folio); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 51e4882d0873..fb37adecfc91 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -2578,6 +2578,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, { kvm_pfn_t pfn; pte_t *ptep; + pte_t pte; spinlock_t *ptl; int r; @@ -2601,14 +2602,16 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, return r; } - if (write_fault && !pte_write(*ptep)) { + pte = ptep_get(ptep); + + if (write_fault && !pte_write(pte)) { pfn = KVM_PFN_ERR_RO_FAULT; goto out; } if (writable) - *writable = pte_write(*ptep); - pfn = pte_pfn(*ptep); + *writable = pte_write(pte); + pfn = pte_pfn(pte); /* * Get a reference here because callers of *hva_to_pfn* and @@ -2626,7 +2629,7 @@ static int hva_to_pfn_remapped(struct vm_area_struct *vma, * tail pages of non-compound higher order allocations, which * would then underflow the refcount when the caller does the * required put_page. Don't allow those pages here. - */ + */ if (!kvm_try_get_pfn(pfn)) r = -EFAULT; -- cgit v1.2.3 From f999f38b4e6f6fd444acdc289a156ad781919a9c Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:09 +0200 Subject: mm: zswap: add pool shrinking mechanism Patch series "mm: zswap: move writeback LRU from zpool to zswap", v3. This series aims to improve the zswap reclaim mechanism by reorganizing the LRU management. In the current implementation, the LRU is maintained within each zpool driver, resulting in duplicated code across the three drivers. The proposed change consists in moving the LRU management from the individual implementations up to the zswap layer. The primary objective of this refactoring effort is to simplify the codebase. By unifying the reclaim loop and consolidating LRU handling within zswap, we can eliminate redundant code and improve maintainability. Additionally, this change enables the reclamation of stored pages in their actual LRU order. Presently, the zpool drivers link backing pages in an LRU, causing compressed pages with different LRU positions to be written back simultaneously. The series consists of several patches. The first patch implements the LRU and the reclaim loop in zswap, but it is not used yet because all three driver implementations are marked as zpool_evictable. The following three commits modify each zpool driver to be not zpool_evictable, allowing the use of the reclaim loop in zswap. As the drivers removed their shrink functions, the zpool interface is then trimmed by removing zpool_evictable, zpool_ops, and zpool_shrink. Finally, the code in zswap is further cleaned up by simplifying the writeback function and removing the now unnecessary zswap_header. This patch (of 7): Each zpool driver (zbud, z3fold and zsmalloc) implements its own shrink function, which is called from zpool_shrink. However, with this commit, a unified shrink function is added to zswap. The ultimate goal is to eliminate the need for zpool_shrink once all zpool implementations have dropped their shrink code. To ensure the functionality of each commit, this change focuses solely on adding the mechanism itself. No modifications are made to the backends, meaning that functionally, there are no immediate changes. The zswap mechanism will only come into effect once the backends have removed their shrink code. The subsequent commits will address the modifications needed in the backends. Link: https://lkml.kernel.org/r/20230612093815.133504-1-cerasuolodomenico@gmail.com Link: https://lkml.kernel.org/r/20230612093815.133504-2-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Acked-by: Nhat Pham Tested-by: Yosry Ahmed Acked-by: Johannes Weiner Reviewed-by: Yosry Ahmed Reviewed-by: Sergey Senozhatsky Cc: Dan Streetman Cc: Minchan Kim Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- mm/zswap.c | 97 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 92 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 9fa86265f6dd..0024ec5ed574 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -154,6 +154,12 @@ struct crypto_acomp_ctx { struct mutex *mutex; }; +/* + * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock. + * The only case where lru_lock is not acquired while holding tree.lock is + * when a zswap_entry is taken off the lru for writeback, in that case it + * needs to be verified that it's still valid in the tree. + */ struct zswap_pool { struct zpool *zpool; struct crypto_acomp_ctx __percpu *acomp_ctx; @@ -163,6 +169,8 @@ struct zswap_pool { struct work_struct shrink_work; struct hlist_node node; char tfm_name[CRYPTO_MAX_ALG_NAME]; + struct list_head lru; + spinlock_t lru_lock; }; /* @@ -180,10 +188,12 @@ struct zswap_pool { * be held while changing the refcount. Since the lock must * be held, there is no reason to also make refcount atomic. * length - the length in bytes of the compressed page data. Needed during - * decompression. For a same value filled page length is 0. + * decompression. For a same value filled page length is 0, and both + * pool and lru are invalid and must be ignored. * pool - the zswap_pool the entry's data is in * handle - zpool allocation handle that stores the compressed page data * value - value of the same-value filled pages which have same content + * lru - handle to the pool's lru used to evict pages. */ struct zswap_entry { struct rb_node rbnode; @@ -196,6 +206,7 @@ struct zswap_entry { unsigned long value; }; struct obj_cgroup *objcg; + struct list_head lru; }; struct zswap_header { @@ -368,6 +379,12 @@ static void zswap_free_entry(struct zswap_entry *entry) if (!entry->length) atomic_dec(&zswap_same_filled_pages); else { + /* zpool_evictable will be removed once all 3 backends have migrated */ + if (!zpool_evictable(entry->pool->zpool)) { + spin_lock(&entry->pool->lru_lock); + list_del(&entry->lru); + spin_unlock(&entry->pool->lru_lock); + } zpool_free(entry->pool->zpool, entry->handle); zswap_pool_put(entry->pool); } @@ -588,14 +605,72 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) return NULL; } +static int zswap_reclaim_entry(struct zswap_pool *pool) +{ + struct zswap_header *zhdr; + struct zswap_entry *entry; + struct zswap_tree *tree; + pgoff_t swpoffset; + int ret; + + /* Get an entry off the LRU */ + spin_lock(&pool->lru_lock); + if (list_empty(&pool->lru)) { + spin_unlock(&pool->lru_lock); + return -EINVAL; + } + entry = list_last_entry(&pool->lru, struct zswap_entry, lru); + list_del_init(&entry->lru); + zhdr = zpool_map_handle(pool->zpool, entry->handle, ZPOOL_MM_RO); + tree = zswap_trees[swp_type(zhdr->swpentry)]; + zpool_unmap_handle(pool->zpool, entry->handle); + /* + * Once the lru lock is dropped, the entry might get freed. The + * swpoffset is copied to the stack, and entry isn't deref'd again + * until the entry is verified to still be alive in the tree. + */ + swpoffset = swp_offset(zhdr->swpentry); + spin_unlock(&pool->lru_lock); + + /* Check for invalidate() race */ + spin_lock(&tree->lock); + if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) { + ret = -EAGAIN; + goto unlock; + } + /* Hold a reference to prevent a free during writeback */ + zswap_entry_get(entry); + spin_unlock(&tree->lock); + + ret = zswap_writeback_entry(pool->zpool, entry->handle); + + spin_lock(&tree->lock); + if (ret) { + /* Writeback failed, put entry back on LRU */ + spin_lock(&pool->lru_lock); + list_move(&entry->lru, &pool->lru); + spin_unlock(&pool->lru_lock); + } + + /* Drop local reference */ + zswap_entry_put(tree, entry); +unlock: + spin_unlock(&tree->lock); + return ret ? -EAGAIN : 0; +} + static void shrink_worker(struct work_struct *w) { struct zswap_pool *pool = container_of(w, typeof(*pool), shrink_work); int ret, failures = 0; + /* zpool_evictable will be removed once all 3 backends have migrated */ do { - ret = zpool_shrink(pool->zpool, 1, NULL); + if (zpool_evictable(pool->zpool)) + ret = zpool_shrink(pool->zpool, 1, NULL); + else + ret = zswap_reclaim_entry(pool); if (ret) { zswap_reject_reclaim_fail++; if (ret != -EAGAIN) @@ -659,6 +734,8 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) */ kref_init(&pool->kref); INIT_LIST_HEAD(&pool->list); + INIT_LIST_HEAD(&pool->lru); + spin_lock_init(&pool->lru_lock); INIT_WORK(&pool->shrink_work, shrink_worker); zswap_pool_debug("created", pool); @@ -1274,7 +1351,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, } /* store */ - hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0; + hlen = sizeof(zhdr); gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; if (zpool_malloc_support_movable(entry->pool->zpool)) gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; @@ -1317,6 +1394,12 @@ insert_entry: zswap_entry_put(tree, dupentry); } } while (ret == -EEXIST); + /* zpool_evictable will be removed once all 3 backends have migrated */ + if (entry->length && !zpool_evictable(entry->pool->zpool)) { + spin_lock(&entry->pool->lru_lock); + list_add(&entry->lru, &entry->pool->lru); + spin_unlock(&entry->pool->lru_lock); + } spin_unlock(&tree->lock); /* update stats */ @@ -1398,8 +1481,7 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, /* decompress */ dlen = PAGE_SIZE; src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO); - if (zpool_evictable(entry->pool->zpool)) - src += sizeof(struct zswap_header); + src += sizeof(struct zswap_header); if (!zpool_can_sleep_mapped(entry->pool->zpool)) { memcpy(tmp, src, entry->length); @@ -1432,6 +1514,11 @@ freeentry: if (!ret && zswap_exclusive_loads_enabled) { zswap_invalidate_entry(tree, entry); *exclusive = true; + } else if (entry->length && !zpool_evictable(entry->pool->zpool)) { + /* zpool_evictable will be removed once all 3 backends have migrated */ + spin_lock(&entry->pool->lru_lock); + list_move(&entry->lru, &entry->pool->lru); + spin_unlock(&entry->pool->lru_lock); } spin_unlock(&tree->lock); -- cgit v1.2.3 From 1be537c6451b7203776063a8655c9096ebc50790 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:10 +0200 Subject: mm: zswap: remove page reclaim logic from zbud Switch zbud to the new generic zswap LRU and remove its custom implementation. Link: https://lkml.kernel.org/r/20230612093815.133504-3-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Minchan Kim Cc: Nhat Pham Cc: Sergey Senozhatsky Cc: Seth Jennings Cc: Vitaly Wool Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/zbud.c | 163 +------------------------------------------------------------- 1 file changed, 1 insertion(+), 162 deletions(-) (limited to 'mm') diff --git a/mm/zbud.c b/mm/zbud.c index 3acd26193920..9d35fd4091ed 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -83,11 +83,7 @@ struct zbud_pool; * its free region. * @buddied: list tracking the zbud pages that contain two buddies; * these zbud pages are full - * @lru: list tracking the zbud pages in LRU order by most recently - * added buddy. * @pages_nr: number of zbud pages in the pool. - * @zpool: zpool driver - * @zpool_ops: zpool operations structure with an evict callback * * This structure is allocated at pool creation time and maintains metadata * pertaining to a particular zbud pool. @@ -102,26 +98,20 @@ struct zbud_pool { struct list_head buddied; struct list_head unbuddied[NCHUNKS]; }; - struct list_head lru; u64 pages_nr; - struct zpool *zpool; - const struct zpool_ops *zpool_ops; }; /* * struct zbud_header - zbud page metadata occupying the first chunk of each * zbud page. * @buddy: links the zbud page into the unbuddied/buddied lists in the pool - * @lru: links the zbud page into the lru list in the pool * @first_chunks: the size of the first buddy in chunks, 0 if free * @last_chunks: the size of the last buddy in chunks, 0 if free */ struct zbud_header { struct list_head buddy; - struct list_head lru; unsigned int first_chunks; unsigned int last_chunks; - bool under_reclaim; }; /***************** @@ -149,8 +139,6 @@ static struct zbud_header *init_zbud_page(struct page *page) zhdr->first_chunks = 0; zhdr->last_chunks = 0; INIT_LIST_HEAD(&zhdr->buddy); - INIT_LIST_HEAD(&zhdr->lru); - zhdr->under_reclaim = false; return zhdr; } @@ -221,7 +209,6 @@ static struct zbud_pool *zbud_create_pool(gfp_t gfp) for_each_unbuddied_list(i, 0) INIT_LIST_HEAD(&pool->unbuddied[i]); INIT_LIST_HEAD(&pool->buddied); - INIT_LIST_HEAD(&pool->lru); pool->pages_nr = 0; return pool; } @@ -310,11 +297,6 @@ found: list_add(&zhdr->buddy, &pool->buddied); } - /* Add/move zbud page to beginning of LRU */ - if (!list_empty(&zhdr->lru)) - list_del(&zhdr->lru); - list_add(&zhdr->lru, &pool->lru); - *handle = encode_handle(zhdr, bud); spin_unlock(&pool->lock); @@ -325,11 +307,6 @@ found: * zbud_free() - frees the allocation associated with the given handle * @pool: pool in which the allocation resided * @handle: handle associated with the allocation returned by zbud_alloc() - * - * In the case that the zbud page in which the allocation resides is under - * reclaim, as indicated by the PG_reclaim flag being set, this function - * only sets the first|last_chunks to 0. The page is actually freed - * once both buddies are evicted (see zbud_reclaim_page() below). */ static void zbud_free(struct zbud_pool *pool, unsigned long handle) { @@ -345,18 +322,11 @@ static void zbud_free(struct zbud_pool *pool, unsigned long handle) else zhdr->first_chunks = 0; - if (zhdr->under_reclaim) { - /* zbud page is under reclaim, reclaim will free */ - spin_unlock(&pool->lock); - return; - } - /* Remove from existing buddy list */ list_del(&zhdr->buddy); if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { /* zbud page is empty, free */ - list_del(&zhdr->lru); free_zbud_page(zhdr); pool->pages_nr--; } else { @@ -368,110 +338,6 @@ static void zbud_free(struct zbud_pool *pool, unsigned long handle) spin_unlock(&pool->lock); } -/** - * zbud_reclaim_page() - evicts allocations from a pool page and frees it - * @pool: pool from which a page will attempt to be evicted - * @retries: number of pages on the LRU list for which eviction will - * be attempted before failing - * - * zbud reclaim is different from normal system reclaim in that the reclaim is - * done from the bottom, up. This is because only the bottom layer, zbud, has - * information on how the allocations are organized within each zbud page. This - * has the potential to create interesting locking situations between zbud and - * the user, however. - * - * To avoid these, this is how zbud_reclaim_page() should be called: - * - * The user detects a page should be reclaimed and calls zbud_reclaim_page(). - * zbud_reclaim_page() will remove a zbud page from the pool LRU list and call - * the user-defined eviction handler with the pool and handle as arguments. - * - * If the handle can not be evicted, the eviction handler should return - * non-zero. zbud_reclaim_page() will add the zbud page back to the - * appropriate list and try the next zbud page on the LRU up to - * a user defined number of retries. - * - * If the handle is successfully evicted, the eviction handler should - * return 0 _and_ should have called zbud_free() on the handle. zbud_free() - * contains logic to delay freeing the page if the page is under reclaim, - * as indicated by the setting of the PG_reclaim flag on the underlying page. - * - * If all buddies in the zbud page are successfully evicted, then the - * zbud page can be freed. - * - * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are - * no pages to evict or an eviction handler is not registered, -EAGAIN if - * the retry limit was hit. - */ -static int zbud_reclaim_page(struct zbud_pool *pool, unsigned int retries) -{ - int i, ret, freechunks; - struct zbud_header *zhdr; - unsigned long first_handle = 0, last_handle = 0; - - spin_lock(&pool->lock); - if (list_empty(&pool->lru)) { - spin_unlock(&pool->lock); - return -EINVAL; - } - for (i = 0; i < retries; i++) { - zhdr = list_last_entry(&pool->lru, struct zbud_header, lru); - list_del(&zhdr->lru); - list_del(&zhdr->buddy); - /* Protect zbud page against free */ - zhdr->under_reclaim = true; - /* - * We need encode the handles before unlocking, since we can - * race with free that will set (first|last)_chunks to 0 - */ - first_handle = 0; - last_handle = 0; - if (zhdr->first_chunks) - first_handle = encode_handle(zhdr, FIRST); - if (zhdr->last_chunks) - last_handle = encode_handle(zhdr, LAST); - spin_unlock(&pool->lock); - - /* Issue the eviction callback(s) */ - if (first_handle) { - ret = pool->zpool_ops->evict(pool->zpool, first_handle); - if (ret) - goto next; - } - if (last_handle) { - ret = pool->zpool_ops->evict(pool->zpool, last_handle); - if (ret) - goto next; - } -next: - spin_lock(&pool->lock); - zhdr->under_reclaim = false; - if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { - /* - * Both buddies are now free, free the zbud page and - * return success. - */ - free_zbud_page(zhdr); - pool->pages_nr--; - spin_unlock(&pool->lock); - return 0; - } else if (zhdr->first_chunks == 0 || - zhdr->last_chunks == 0) { - /* add to unbuddied list */ - freechunks = num_free_chunks(zhdr); - list_add(&zhdr->buddy, &pool->unbuddied[freechunks]); - } else { - /* add to buddied list */ - list_add(&zhdr->buddy, &pool->buddied); - } - - /* add to beginning of LRU */ - list_add(&zhdr->lru, &pool->lru); - } - spin_unlock(&pool->lock); - return -EAGAIN; -} - /** * zbud_map() - maps the allocation associated with the given handle * @pool: pool in which the allocation resides @@ -518,14 +384,7 @@ static void *zbud_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool) { - struct zbud_pool *pool; - - pool = zbud_create_pool(gfp); - if (pool) { - pool->zpool = zpool; - pool->zpool_ops = zpool_ops; - } - return pool; + return zbud_create_pool(gfp); } static void zbud_zpool_destroy(void *pool) @@ -543,25 +402,6 @@ static void zbud_zpool_free(void *pool, unsigned long handle) zbud_free(pool, handle); } -static int zbud_zpool_shrink(void *pool, unsigned int pages, - unsigned int *reclaimed) -{ - unsigned int total = 0; - int ret = -EINVAL; - - while (total < pages) { - ret = zbud_reclaim_page(pool, 8); - if (ret < 0) - break; - total++; - } - - if (reclaimed) - *reclaimed = total; - - return ret; -} - static void *zbud_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { @@ -585,7 +425,6 @@ static struct zpool_driver zbud_zpool_driver = { .destroy = zbud_zpool_destroy, .malloc = zbud_zpool_malloc, .free = zbud_zpool_free, - .shrink = zbud_zpool_shrink, .map = zbud_zpool_map, .unmap = zbud_zpool_unmap, .total_size = zbud_zpool_total_size, -- cgit v1.2.3 From e774a7bc7f0adba785d814ce9a329b6c5c32706a Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:11 +0200 Subject: mm: zswap: remove page reclaim logic from z3fold Switch z3fold to the new generic zswap LRU and remove its custom implementation. Link: https://lkml.kernel.org/r/20230612093815.133504-4-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Minchan Kim Cc: Nhat Pham Cc: Sergey Senozhatsky Cc: Seth Jennings Cc: Vitaly Wool Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/z3fold.c | 245 +----------------------------------------------------------- 1 file changed, 2 insertions(+), 243 deletions(-) (limited to 'mm') diff --git a/mm/z3fold.c b/mm/z3fold.c index 0cef845d397b..238a214de59f 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -125,13 +125,11 @@ struct z3fold_header { /** * struct z3fold_pool - stores metadata for each z3fold pool * @name: pool name - * @lock: protects pool unbuddied/lru lists + * @lock: protects pool unbuddied lists * @stale_lock: protects pool stale page list * @unbuddied: per-cpu array of lists tracking z3fold pages that contain 2- * buddies; the list each z3fold page is added to depends on * the size of its free region. - * @lru: list tracking the z3fold pages in LRU order by most recently - * added buddy. * @stale: list of pages marked for freeing * @pages_nr: number of z3fold pages in the pool. * @c_handle: cache for z3fold_buddy_slots allocation @@ -149,12 +147,9 @@ struct z3fold_pool { spinlock_t lock; spinlock_t stale_lock; struct list_head *unbuddied; - struct list_head lru; struct list_head stale; atomic64_t pages_nr; struct kmem_cache *c_handle; - struct zpool *zpool; - const struct zpool_ops *zpool_ops; struct workqueue_struct *compact_wq; struct workqueue_struct *release_wq; struct work_struct work; @@ -329,7 +324,6 @@ static struct z3fold_header *init_z3fold_page(struct page *page, bool headless, struct z3fold_header *zhdr = page_address(page); struct z3fold_buddy_slots *slots; - INIT_LIST_HEAD(&page->lru); clear_bit(PAGE_HEADLESS, &page->private); clear_bit(MIDDLE_CHUNK_MAPPED, &page->private); clear_bit(NEEDS_COMPACTING, &page->private); @@ -451,8 +445,6 @@ static void __release_z3fold_page(struct z3fold_header *zhdr, bool locked) set_bit(PAGE_STALE, &page->private); clear_bit(NEEDS_COMPACTING, &page->private); spin_lock(&pool->lock); - if (!list_empty(&page->lru)) - list_del_init(&page->lru); spin_unlock(&pool->lock); if (locked) @@ -930,7 +922,6 @@ static struct z3fold_pool *z3fold_create_pool(const char *name, gfp_t gfp) for_each_unbuddied_list(i, 0) INIT_LIST_HEAD(&unbuddied[i]); } - INIT_LIST_HEAD(&pool->lru); INIT_LIST_HEAD(&pool->stale); atomic64_set(&pool->pages_nr, 0); pool->name = name; @@ -1073,12 +1064,6 @@ found: headless: spin_lock(&pool->lock); - /* Add/move z3fold page to beginning of LRU */ - if (!list_empty(&page->lru)) - list_del(&page->lru); - - list_add(&page->lru, &pool->lru); - *handle = encode_handle(zhdr, bud); spin_unlock(&pool->lock); if (bud != HEADLESS) @@ -1115,9 +1100,6 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) * immediately so we don't care about its value any more. */ if (!page_claimed) { - spin_lock(&pool->lock); - list_del(&page->lru); - spin_unlock(&pool->lock); put_z3fold_header(zhdr); free_z3fold_page(page, true); atomic64_dec(&pool->pages_nr); @@ -1172,194 +1154,6 @@ static void z3fold_free(struct z3fold_pool *pool, unsigned long handle) put_z3fold_header(zhdr); } -/** - * z3fold_reclaim_page() - evicts allocations from a pool page and frees it - * @pool: pool from which a page will attempt to be evicted - * @retries: number of pages on the LRU list for which eviction will - * be attempted before failing - * - * z3fold reclaim is different from normal system reclaim in that it is done - * from the bottom, up. This is because only the bottom layer, z3fold, has - * information on how the allocations are organized within each z3fold page. - * This has the potential to create interesting locking situations between - * z3fold and the user, however. - * - * To avoid these, this is how z3fold_reclaim_page() should be called: - * - * The user detects a page should be reclaimed and calls z3fold_reclaim_page(). - * z3fold_reclaim_page() will remove a z3fold page from the pool LRU list and - * call the user-defined eviction handler with the pool and handle as - * arguments. - * - * If the handle can not be evicted, the eviction handler should return - * non-zero. z3fold_reclaim_page() will add the z3fold page back to the - * appropriate list and try the next z3fold page on the LRU up to - * a user defined number of retries. - * - * If the handle is successfully evicted, the eviction handler should - * return 0 _and_ should have called z3fold_free() on the handle. z3fold_free() - * contains logic to delay freeing the page if the page is under reclaim, - * as indicated by the setting of the PG_reclaim flag on the underlying page. - * - * If all buddies in the z3fold page are successfully evicted, then the - * z3fold page can be freed. - * - * Returns: 0 if page is successfully freed, otherwise -EINVAL if there are - * no pages to evict or an eviction handler is not registered, -EAGAIN if - * the retry limit was hit. - */ -static int z3fold_reclaim_page(struct z3fold_pool *pool, unsigned int retries) -{ - int i, ret = -1; - struct z3fold_header *zhdr = NULL; - struct page *page = NULL; - struct list_head *pos; - unsigned long first_handle = 0, middle_handle = 0, last_handle = 0; - struct z3fold_buddy_slots slots __attribute__((aligned(SLOTS_ALIGN))); - - rwlock_init(&slots.lock); - slots.pool = (unsigned long)pool | (1 << HANDLES_NOFREE); - - spin_lock(&pool->lock); - for (i = 0; i < retries; i++) { - if (list_empty(&pool->lru)) { - spin_unlock(&pool->lock); - return -EINVAL; - } - list_for_each_prev(pos, &pool->lru) { - page = list_entry(pos, struct page, lru); - - zhdr = page_address(page); - if (test_bit(PAGE_HEADLESS, &page->private)) { - /* - * For non-headless pages, we wait to do this - * until we have the page lock to avoid racing - * with __z3fold_alloc(). Headless pages don't - * have a lock (and __z3fold_alloc() will never - * see them), but we still need to test and set - * PAGE_CLAIMED to avoid racing with - * z3fold_free(), so just do it now before - * leaving the loop. - */ - if (test_and_set_bit(PAGE_CLAIMED, &page->private)) - continue; - - break; - } - - if (!z3fold_page_trylock(zhdr)) { - zhdr = NULL; - continue; /* can't evict at this point */ - } - - /* test_and_set_bit is of course atomic, but we still - * need to do it under page lock, otherwise checking - * that bit in __z3fold_alloc wouldn't make sense - */ - if (zhdr->foreign_handles || - test_and_set_bit(PAGE_CLAIMED, &page->private)) { - z3fold_page_unlock(zhdr); - zhdr = NULL; - continue; /* can't evict such page */ - } - list_del_init(&zhdr->buddy); - zhdr->cpu = -1; - /* See comment in __z3fold_alloc. */ - kref_get(&zhdr->refcount); - break; - } - - if (!zhdr) - break; - - list_del_init(&page->lru); - spin_unlock(&pool->lock); - - if (!test_bit(PAGE_HEADLESS, &page->private)) { - /* - * We need encode the handles before unlocking, and - * use our local slots structure because z3fold_free - * can zero out zhdr->slots and we can't do much - * about that - */ - first_handle = 0; - last_handle = 0; - middle_handle = 0; - memset(slots.slot, 0, sizeof(slots.slot)); - if (zhdr->first_chunks) - first_handle = __encode_handle(zhdr, &slots, - FIRST); - if (zhdr->middle_chunks) - middle_handle = __encode_handle(zhdr, &slots, - MIDDLE); - if (zhdr->last_chunks) - last_handle = __encode_handle(zhdr, &slots, - LAST); - /* - * it's safe to unlock here because we hold a - * reference to this page - */ - z3fold_page_unlock(zhdr); - } else { - first_handle = encode_handle(zhdr, HEADLESS); - last_handle = middle_handle = 0; - } - /* Issue the eviction callback(s) */ - if (middle_handle) { - ret = pool->zpool_ops->evict(pool->zpool, middle_handle); - if (ret) - goto next; - } - if (first_handle) { - ret = pool->zpool_ops->evict(pool->zpool, first_handle); - if (ret) - goto next; - } - if (last_handle) { - ret = pool->zpool_ops->evict(pool->zpool, last_handle); - if (ret) - goto next; - } -next: - if (test_bit(PAGE_HEADLESS, &page->private)) { - if (ret == 0) { - free_z3fold_page(page, true); - atomic64_dec(&pool->pages_nr); - return 0; - } - spin_lock(&pool->lock); - list_add(&page->lru, &pool->lru); - spin_unlock(&pool->lock); - clear_bit(PAGE_CLAIMED, &page->private); - } else { - struct z3fold_buddy_slots *slots = zhdr->slots; - z3fold_page_lock(zhdr); - if (kref_put(&zhdr->refcount, - release_z3fold_page_locked)) { - kmem_cache_free(pool->c_handle, slots); - return 0; - } - /* - * if we are here, the page is still not completely - * free. Take the global pool lock then to be able - * to add it back to the lru list - */ - spin_lock(&pool->lock); - list_add(&page->lru, &pool->lru); - spin_unlock(&pool->lock); - if (list_empty(&zhdr->buddy)) - add_to_unbuddied(pool, zhdr); - clear_bit(PAGE_CLAIMED, &page->private); - z3fold_page_unlock(zhdr); - } - - /* We started off locked to we need to lock the pool back */ - spin_lock(&pool->lock); - } - spin_unlock(&pool->lock); - return -EAGAIN; -} - /** * z3fold_map() - maps the allocation associated with the given handle * @pool: pool in which the allocation resides @@ -1470,8 +1264,6 @@ static bool z3fold_page_isolate(struct page *page, isolate_mode_t mode) spin_lock(&pool->lock); if (!list_empty(&zhdr->buddy)) list_del_init(&zhdr->buddy); - if (!list_empty(&page->lru)) - list_del_init(&page->lru); spin_unlock(&pool->lock); kref_get(&zhdr->refcount); @@ -1531,9 +1323,6 @@ static int z3fold_page_migrate(struct page *newpage, struct page *page, encode_handle(new_zhdr, MIDDLE); set_bit(NEEDS_COMPACTING, &newpage->private); new_zhdr->cpu = smp_processor_id(); - spin_lock(&pool->lock); - list_add(&newpage->lru, &pool->lru); - spin_unlock(&pool->lock); __SetPageMovable(newpage, &z3fold_mops); z3fold_page_unlock(new_zhdr); @@ -1559,9 +1348,6 @@ static void z3fold_page_putback(struct page *page) INIT_LIST_HEAD(&page->lru); if (kref_put(&zhdr->refcount, release_z3fold_page_locked)) return; - spin_lock(&pool->lock); - list_add(&page->lru, &pool->lru); - spin_unlock(&pool->lock); if (list_empty(&zhdr->buddy)) add_to_unbuddied(pool, zhdr); clear_bit(PAGE_CLAIMED, &page->private); @@ -1582,14 +1368,7 @@ static void *z3fold_zpool_create(const char *name, gfp_t gfp, const struct zpool_ops *zpool_ops, struct zpool *zpool) { - struct z3fold_pool *pool; - - pool = z3fold_create_pool(name, gfp); - if (pool) { - pool->zpool = zpool; - pool->zpool_ops = zpool_ops; - } - return pool; + return z3fold_create_pool(name, gfp); } static void z3fold_zpool_destroy(void *pool) @@ -1607,25 +1386,6 @@ static void z3fold_zpool_free(void *pool, unsigned long handle) z3fold_free(pool, handle); } -static int z3fold_zpool_shrink(void *pool, unsigned int pages, - unsigned int *reclaimed) -{ - unsigned int total = 0; - int ret = -EINVAL; - - while (total < pages) { - ret = z3fold_reclaim_page(pool, 8); - if (ret < 0) - break; - total++; - } - - if (reclaimed) - *reclaimed = total; - - return ret; -} - static void *z3fold_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { @@ -1649,7 +1409,6 @@ static struct zpool_driver z3fold_zpool_driver = { .destroy = z3fold_zpool_destroy, .malloc = z3fold_zpool_malloc, .free = z3fold_zpool_free, - .shrink = z3fold_zpool_shrink, .map = z3fold_zpool_map, .unmap = z3fold_zpool_unmap, .total_size = z3fold_zpool_total_size, -- cgit v1.2.3 From b3067742ae36b36b959835a33937c4dc458f8183 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:12 +0200 Subject: mm: zswap: remove page reclaim logic from zsmalloc Switch zsmalloc to the new generic zswap LRU and remove its custom implementation. Link: https://lkml.kernel.org/r/20230612093815.133504-5-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Acked-by: Johannes Weiner Acked-by: Nhat Pham Acked-by: Minchan Kim Tested-by: Yosry Ahmed Acked-by: Sergey Senozhatsky Cc: Dan Streetman Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- mm/zsmalloc.c | 392 ++-------------------------------------------------------- 1 file changed, 12 insertions(+), 380 deletions(-) (limited to 'mm') diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index c0d433541636..e4d1ad521738 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -107,21 +107,8 @@ */ #define OBJ_ALLOCATED_TAG 1 -#ifdef CONFIG_ZPOOL -/* - * The second least-significant bit in the object's header identifies if the - * value stored at the header is a deferred handle from the last reclaim - * attempt. - * - * As noted above, this is valid because we have room for two bits. - */ -#define OBJ_DEFERRED_HANDLE_TAG 2 -#define OBJ_TAG_BITS 2 -#define OBJ_TAG_MASK (OBJ_ALLOCATED_TAG | OBJ_DEFERRED_HANDLE_TAG) -#else #define OBJ_TAG_BITS 1 #define OBJ_TAG_MASK OBJ_ALLOCATED_TAG -#endif /* CONFIG_ZPOOL */ #define OBJ_INDEX_BITS (BITS_PER_LONG - _PFN_BITS - OBJ_TAG_BITS) #define OBJ_INDEX_MASK ((_AC(1, UL) << OBJ_INDEX_BITS) - 1) @@ -227,12 +214,6 @@ struct link_free { * Handle of allocated object. */ unsigned long handle; -#ifdef CONFIG_ZPOOL - /* - * Deferred handle of a reclaimed object. - */ - unsigned long deferred_handle; -#endif }; }; @@ -250,13 +231,6 @@ struct zs_pool { /* Compact classes */ struct shrinker shrinker; -#ifdef CONFIG_ZPOOL - /* List tracking the zspages in LRU order by most recently added object */ - struct list_head lru; - struct zpool *zpool; - const struct zpool_ops *zpool_ops; -#endif - #ifdef CONFIG_ZSMALLOC_STAT struct dentry *stat_dentry; #endif @@ -279,13 +253,6 @@ struct zspage { unsigned int freeobj; struct page *first_page; struct list_head list; /* fullness list */ - -#ifdef CONFIG_ZPOOL - /* links the zspage to the lru list in the pool */ - struct list_head lru; - bool under_reclaim; -#endif - struct zs_pool *pool; rwlock_t lock; }; @@ -393,14 +360,7 @@ static void *zs_zpool_create(const char *name, gfp_t gfp, * different contexts and its caller must provide a valid * gfp mask. */ - struct zs_pool *pool = zs_create_pool(name); - - if (pool) { - pool->zpool = zpool; - pool->zpool_ops = zpool_ops; - } - - return pool; + return zs_create_pool(name); } static void zs_zpool_destroy(void *pool) @@ -422,27 +382,6 @@ static void zs_zpool_free(void *pool, unsigned long handle) zs_free(pool, handle); } -static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries); - -static int zs_zpool_shrink(void *pool, unsigned int pages, - unsigned int *reclaimed) -{ - unsigned int total = 0; - int ret = -EINVAL; - - while (total < pages) { - ret = zs_reclaim_page(pool, 8); - if (ret < 0) - break; - total++; - } - - if (reclaimed) - *reclaimed = total; - - return ret; -} - static void *zs_zpool_map(void *pool, unsigned long handle, enum zpool_mapmode mm) { @@ -481,7 +420,6 @@ static struct zpool_driver zs_zpool_driver = { .malloc_support_movable = true, .malloc = zs_zpool_malloc, .free = zs_zpool_free, - .shrink = zs_zpool_shrink, .map = zs_zpool_map, .unmap = zs_zpool_unmap, .total_size = zs_zpool_total_size, @@ -884,14 +822,6 @@ static inline bool obj_allocated(struct page *page, void *obj, unsigned long *ph return obj_tagged(page, obj, phandle, OBJ_ALLOCATED_TAG); } -#ifdef CONFIG_ZPOOL -static bool obj_stores_deferred_handle(struct page *page, void *obj, - unsigned long *phandle) -{ - return obj_tagged(page, obj, phandle, OBJ_DEFERRED_HANDLE_TAG); -} -#endif - static void reset_page(struct page *page) { __ClearPageMovable(page); @@ -922,39 +852,6 @@ unlock: return 0; } -#ifdef CONFIG_ZPOOL -static unsigned long find_deferred_handle_obj(struct size_class *class, - struct page *page, int *obj_idx); - -/* - * Free all the deferred handles whose objects are freed in zs_free. - */ -static void free_handles(struct zs_pool *pool, struct size_class *class, - struct zspage *zspage) -{ - int obj_idx = 0; - struct page *page = get_first_page(zspage); - unsigned long handle; - - while (1) { - handle = find_deferred_handle_obj(class, page, &obj_idx); - if (!handle) { - page = get_next_page(page); - if (!page) - break; - obj_idx = 0; - continue; - } - - cache_free_handle(pool, handle); - obj_idx++; - } -} -#else -static inline void free_handles(struct zs_pool *pool, struct size_class *class, - struct zspage *zspage) {} -#endif - static void __free_zspage(struct zs_pool *pool, struct size_class *class, struct zspage *zspage) { @@ -969,9 +866,6 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class, VM_BUG_ON(get_zspage_inuse(zspage)); VM_BUG_ON(fg != ZS_INUSE_RATIO_0); - /* Free all deferred handles from zs_free */ - free_handles(pool, class, zspage); - next = page = get_first_page(zspage); do { VM_BUG_ON_PAGE(!PageLocked(page), page); @@ -1006,9 +900,6 @@ static void free_zspage(struct zs_pool *pool, struct size_class *class, } remove_zspage(class, zspage, ZS_INUSE_RATIO_0); -#ifdef CONFIG_ZPOOL - list_del(&zspage->lru); -#endif __free_zspage(pool, class, zspage); } @@ -1054,11 +945,6 @@ static void init_zspage(struct size_class *class, struct zspage *zspage) off %= PAGE_SIZE; } -#ifdef CONFIG_ZPOOL - INIT_LIST_HEAD(&zspage->lru); - zspage->under_reclaim = false; -#endif - set_freeobj(zspage, 0); } @@ -1525,20 +1411,13 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) /* We completely set up zspage so mark them as movable */ SetZsPageMovable(pool, zspage); out: -#ifdef CONFIG_ZPOOL - /* Add/move zspage to beginning of LRU */ - if (!list_empty(&zspage->lru)) - list_del(&zspage->lru); - list_add(&zspage->lru, &pool->lru); -#endif - spin_unlock(&pool->lock); return handle; } EXPORT_SYMBOL_GPL(zs_malloc); -static void obj_free(int class_size, unsigned long obj, unsigned long *handle) +static void obj_free(int class_size, unsigned long obj) { struct link_free *link; struct zspage *zspage; @@ -1554,25 +1433,12 @@ static void obj_free(int class_size, unsigned long obj, unsigned long *handle) vaddr = kmap_atomic(f_page); link = (struct link_free *)(vaddr + f_offset); - if (handle) { -#ifdef CONFIG_ZPOOL - /* Stores the (deferred) handle in the object's header */ - *handle |= OBJ_DEFERRED_HANDLE_TAG; - *handle &= ~OBJ_ALLOCATED_TAG; - - if (likely(!ZsHugePage(zspage))) - link->deferred_handle = *handle; - else - f_page->index = *handle; -#endif - } else { - /* Insert this object in containing zspage's freelist */ - if (likely(!ZsHugePage(zspage))) - link->next = get_freeobj(zspage) << OBJ_TAG_BITS; - else - f_page->index = 0; - set_freeobj(zspage, f_objidx); - } + /* Insert this object in containing zspage's freelist */ + if (likely(!ZsHugePage(zspage))) + link->next = get_freeobj(zspage) << OBJ_TAG_BITS; + else + f_page->index = 0; + set_freeobj(zspage, f_objidx); kunmap_atomic(vaddr); mod_zspage_inuse(zspage, -1); @@ -1600,21 +1466,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle) class = zspage_class(pool, zspage); class_stat_dec(class, ZS_OBJS_INUSE, 1); - -#ifdef CONFIG_ZPOOL - if (zspage->under_reclaim) { - /* - * Reclaim needs the handles during writeback. It'll free - * them along with the zspage when it's done with them. - * - * Record current deferred handle in the object's header. - */ - obj_free(class->size, obj, &handle); - spin_unlock(&pool->lock); - return; - } -#endif - obj_free(class->size, obj, NULL); + obj_free(class->size, obj); fullness = fix_fullness_group(class, zspage); if (fullness == ZS_INUSE_RATIO_0) @@ -1735,18 +1587,6 @@ static unsigned long find_alloced_obj(struct size_class *class, return find_tagged_obj(class, page, obj_idx, OBJ_ALLOCATED_TAG); } -#ifdef CONFIG_ZPOOL -/* - * Find object storing a deferred handle in header in zspage from index object - * and return handle. - */ -static unsigned long find_deferred_handle_obj(struct size_class *class, - struct page *page, int *obj_idx) -{ - return find_tagged_obj(class, page, obj_idx, OBJ_DEFERRED_HANDLE_TAG); -} -#endif - struct zs_compact_control { /* Source spage for migration which could be a subpage of zspage */ struct page *s_page; @@ -1786,7 +1626,7 @@ static void migrate_zspage(struct zs_pool *pool, struct size_class *class, zs_object_copy(class, free_obj, used_obj); obj_idx++; record_obj(handle, free_obj); - obj_free(class->size, used_obj, NULL); + obj_free(class->size, used_obj); } /* Remember last position in this iteration */ @@ -1846,7 +1686,7 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage) return fullness; } -#if defined(CONFIG_ZPOOL) || defined(CONFIG_COMPACTION) +#ifdef CONFIG_COMPACTION /* * To prevent zspage destroy during migration, zspage freeing should * hold locks of all pages in the zspage. @@ -1888,24 +1728,7 @@ static void lock_zspage(struct zspage *zspage) } migrate_read_unlock(zspage); } -#endif /* defined(CONFIG_ZPOOL) || defined(CONFIG_COMPACTION) */ - -#ifdef CONFIG_ZPOOL -/* - * Unlocks all the pages of the zspage. - * - * pool->lock must be held before this function is called - * to prevent the underlying pages from migrating. - */ -static void unlock_zspage(struct zspage *zspage) -{ - struct page *page = get_first_page(zspage); - - do { - unlock_page(page); - } while ((page = get_next_page(page)) != NULL); -} -#endif /* CONFIG_ZPOOL */ +#endif /* CONFIG_COMPACTION */ static void migrate_lock_init(struct zspage *zspage) { @@ -2126,9 +1949,6 @@ static void async_free_zspage(struct work_struct *work) VM_BUG_ON(fullness != ZS_INUSE_RATIO_0); class = pool->size_class[class_idx]; spin_lock(&pool->lock); -#ifdef CONFIG_ZPOOL - list_del(&zspage->lru); -#endif __free_zspage(pool, class, zspage); spin_unlock(&pool->lock); } @@ -2474,10 +2294,6 @@ struct zs_pool *zs_create_pool(const char *name) */ zs_register_shrinker(pool); -#ifdef CONFIG_ZPOOL - INIT_LIST_HEAD(&pool->lru); -#endif - return pool; err: @@ -2520,190 +2336,6 @@ void zs_destroy_pool(struct zs_pool *pool) } EXPORT_SYMBOL_GPL(zs_destroy_pool); -#ifdef CONFIG_ZPOOL -static void restore_freelist(struct zs_pool *pool, struct size_class *class, - struct zspage *zspage) -{ - unsigned int obj_idx = 0; - unsigned long handle, off = 0; /* off is within-page offset */ - struct page *page = get_first_page(zspage); - struct link_free *prev_free = NULL; - void *prev_page_vaddr = NULL; - - /* in case no free object found */ - set_freeobj(zspage, (unsigned int)(-1UL)); - - while (page) { - void *vaddr = kmap_atomic(page); - struct page *next_page; - - while (off < PAGE_SIZE) { - void *obj_addr = vaddr + off; - - /* skip allocated object */ - if (obj_allocated(page, obj_addr, &handle)) { - obj_idx++; - off += class->size; - continue; - } - - /* free deferred handle from reclaim attempt */ - if (obj_stores_deferred_handle(page, obj_addr, &handle)) - cache_free_handle(pool, handle); - - if (prev_free) - prev_free->next = obj_idx << OBJ_TAG_BITS; - else /* first free object found */ - set_freeobj(zspage, obj_idx); - - prev_free = (struct link_free *)vaddr + off / sizeof(*prev_free); - /* if last free object in a previous page, need to unmap */ - if (prev_page_vaddr) { - kunmap_atomic(prev_page_vaddr); - prev_page_vaddr = NULL; - } - - obj_idx++; - off += class->size; - } - - /* - * Handle the last (full or partial) object on this page. - */ - next_page = get_next_page(page); - if (next_page) { - if (!prev_free || prev_page_vaddr) { - /* - * There is no free object in this page, so we can safely - * unmap it. - */ - kunmap_atomic(vaddr); - } else { - /* update prev_page_vaddr since prev_free is on this page */ - prev_page_vaddr = vaddr; - } - } else { /* this is the last page */ - if (prev_free) { - /* - * Reset OBJ_TAG_BITS bit to last link to tell - * whether it's allocated object or not. - */ - prev_free->next = -1UL << OBJ_TAG_BITS; - } - - /* unmap previous page (if not done yet) */ - if (prev_page_vaddr) { - kunmap_atomic(prev_page_vaddr); - prev_page_vaddr = NULL; - } - - kunmap_atomic(vaddr); - } - - page = next_page; - off %= PAGE_SIZE; - } -} - -static int zs_reclaim_page(struct zs_pool *pool, unsigned int retries) -{ - int i, obj_idx, ret = 0; - unsigned long handle; - struct zspage *zspage; - struct page *page; - int fullness; - - /* Lock LRU and fullness list */ - spin_lock(&pool->lock); - if (list_empty(&pool->lru)) { - spin_unlock(&pool->lock); - return -EINVAL; - } - - for (i = 0; i < retries; i++) { - struct size_class *class; - - zspage = list_last_entry(&pool->lru, struct zspage, lru); - list_del(&zspage->lru); - - /* zs_free may free objects, but not the zspage and handles */ - zspage->under_reclaim = true; - - class = zspage_class(pool, zspage); - fullness = get_fullness_group(class, zspage); - - /* Lock out object allocations and object compaction */ - remove_zspage(class, zspage, fullness); - - spin_unlock(&pool->lock); - cond_resched(); - - /* Lock backing pages into place */ - lock_zspage(zspage); - - obj_idx = 0; - page = get_first_page(zspage); - while (1) { - handle = find_alloced_obj(class, page, &obj_idx); - if (!handle) { - page = get_next_page(page); - if (!page) - break; - obj_idx = 0; - continue; - } - - /* - * This will write the object and call zs_free. - * - * zs_free will free the object, but the - * under_reclaim flag prevents it from freeing - * the zspage altogether. This is necessary so - * that we can continue working with the - * zspage potentially after the last object - * has been freed. - */ - ret = pool->zpool_ops->evict(pool->zpool, handle); - if (ret) - goto next; - - obj_idx++; - } - -next: - /* For freeing the zspage, or putting it back in the pool and LRU list. */ - spin_lock(&pool->lock); - zspage->under_reclaim = false; - - if (!get_zspage_inuse(zspage)) { - /* - * Fullness went stale as zs_free() won't touch it - * while the page is removed from the pool. Fix it - * up for the check in __free_zspage(). - */ - zspage->fullness = ZS_INUSE_RATIO_0; - - __free_zspage(pool, class, zspage); - spin_unlock(&pool->lock); - return 0; - } - - /* - * Eviction fails on one of the handles, so we need to restore zspage. - * We need to rebuild its freelist (and free stored deferred handles), - * put it back to the correct size class, and add it to the LRU list. - */ - restore_freelist(pool, class, zspage); - putback_zspage(class, zspage); - list_add(&zspage->lru, &pool->lru); - unlock_zspage(zspage); - } - - spin_unlock(&pool->lock); - return -EAGAIN; -} -#endif /* CONFIG_ZPOOL */ - static int __init zs_init(void) { int ret; -- cgit v1.2.3 From 35499e2b79ffc51ea704c3268a5830164825a43e Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:13 +0200 Subject: mm: zswap: remove shrink from zpool interface Now that all three zswap backends have removed their shrink code, it is no longer necessary for the zpool interface to include shrink/writeback endpoints. Link: https://lkml.kernel.org/r/20230612093815.133504-6-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Reviewed-by: Yosry Ahmed Acked-by: Nhat Pham Acked-by: Johannes Weiner Reviewed-by: Sergey Senozhatsky Cc: Dan Streetman Cc: Minchan Kim Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- include/linux/zpool.h | 20 ++------------------ mm/z3fold.c | 4 +--- mm/zbud.c | 4 +--- mm/zpool.c | 48 ++---------------------------------------------- mm/zsmalloc.c | 4 +--- mm/zswap.c | 27 +++++++-------------------- 6 files changed, 14 insertions(+), 93 deletions(-) (limited to 'mm') diff --git a/include/linux/zpool.h b/include/linux/zpool.h index e8997010612a..3296438eec06 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h @@ -14,10 +14,6 @@ struct zpool; -struct zpool_ops { - int (*evict)(struct zpool *pool, unsigned long handle); -}; - /* * Control how a handle is mapped. It will be ignored if the * implementation does not support it. Its use is optional. @@ -39,8 +35,7 @@ enum zpool_mapmode { bool zpool_has_pool(char *type); -struct zpool *zpool_create_pool(const char *type, const char *name, - gfp_t gfp, const struct zpool_ops *ops); +struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp); const char *zpool_get_type(struct zpool *pool); @@ -53,9 +48,6 @@ int zpool_malloc(struct zpool *pool, size_t size, gfp_t gfp, void zpool_free(struct zpool *pool, unsigned long handle); -int zpool_shrink(struct zpool *pool, unsigned int pages, - unsigned int *reclaimed); - void *zpool_map_handle(struct zpool *pool, unsigned long handle, enum zpool_mapmode mm); @@ -72,7 +64,6 @@ u64 zpool_get_total_size(struct zpool *pool); * @destroy: destroy a pool. * @malloc: allocate mem from a pool. * @free: free mem from a pool. - * @shrink: shrink the pool. * @sleep_mapped: whether zpool driver can sleep during map. * @map: map a handle. * @unmap: unmap a handle. @@ -87,10 +78,7 @@ struct zpool_driver { atomic_t refcount; struct list_head list; - void *(*create)(const char *name, - gfp_t gfp, - const struct zpool_ops *ops, - struct zpool *zpool); + void *(*create)(const char *name, gfp_t gfp); void (*destroy)(void *pool); bool malloc_support_movable; @@ -98,9 +86,6 @@ struct zpool_driver { unsigned long *handle); void (*free)(void *pool, unsigned long handle); - int (*shrink)(void *pool, unsigned int pages, - unsigned int *reclaimed); - bool sleep_mapped; void *(*map)(void *pool, unsigned long handle, enum zpool_mapmode mm); @@ -113,7 +98,6 @@ void zpool_register_driver(struct zpool_driver *driver); int zpool_unregister_driver(struct zpool_driver *driver); -bool zpool_evictable(struct zpool *pool); bool zpool_can_sleep_mapped(struct zpool *pool); #endif diff --git a/mm/z3fold.c b/mm/z3fold.c index 238a214de59f..e84de91ecccb 100644 --- a/mm/z3fold.c +++ b/mm/z3fold.c @@ -1364,9 +1364,7 @@ static const struct movable_operations z3fold_mops = { * zpool ****************/ -static void *z3fold_zpool_create(const char *name, gfp_t gfp, - const struct zpool_ops *zpool_ops, - struct zpool *zpool) +static void *z3fold_zpool_create(const char *name, gfp_t gfp) { return z3fold_create_pool(name, gfp); } diff --git a/mm/zbud.c b/mm/zbud.c index 9d35fd4091ed..2190cc1f37b3 100644 --- a/mm/zbud.c +++ b/mm/zbud.c @@ -380,9 +380,7 @@ static u64 zbud_get_pool_size(struct zbud_pool *pool) * zpool ****************/ -static void *zbud_zpool_create(const char *name, gfp_t gfp, - const struct zpool_ops *zpool_ops, - struct zpool *zpool) +static void *zbud_zpool_create(const char *name, gfp_t gfp) { return zbud_create_pool(gfp); } diff --git a/mm/zpool.c b/mm/zpool.c index 6a19c4a58f77..846410479c2f 100644 --- a/mm/zpool.c +++ b/mm/zpool.c @@ -133,7 +133,6 @@ EXPORT_SYMBOL(zpool_has_pool); * @type: The type of the zpool to create (e.g. zbud, zsmalloc) * @name: The name of the zpool (e.g. zram0, zswap) * @gfp: The GFP flags to use when allocating the pool. - * @ops: The optional ops callback. * * This creates a new zpool of the specified type. The gfp flags will be * used when allocating memory, if the implementation supports it. If the @@ -145,8 +144,7 @@ EXPORT_SYMBOL(zpool_has_pool); * * Returns: New zpool on success, NULL on failure. */ -struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, - const struct zpool_ops *ops) +struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp) { struct zpool_driver *driver; struct zpool *zpool; @@ -173,7 +171,7 @@ struct zpool *zpool_create_pool(const char *type, const char *name, gfp_t gfp, } zpool->driver = driver; - zpool->pool = driver->create(name, gfp, ops, zpool); + zpool->pool = driver->create(name, gfp); if (!zpool->pool) { pr_err("couldn't create %s pool\n", type); @@ -279,30 +277,6 @@ void zpool_free(struct zpool *zpool, unsigned long handle) zpool->driver->free(zpool->pool, handle); } -/** - * zpool_shrink() - Shrink the pool size - * @zpool: The zpool to shrink. - * @pages: The number of pages to shrink the pool. - * @reclaimed: The number of pages successfully evicted. - * - * This attempts to shrink the actual memory size of the pool - * by evicting currently used handle(s). If the pool was - * created with no zpool_ops, or the evict call fails for any - * of the handles, this will fail. If non-NULL, the @reclaimed - * parameter will be set to the number of pages reclaimed, - * which may be more than the number of pages requested. - * - * Implementations must guarantee this to be thread-safe. - * - * Returns: 0 on success, negative value on error/failure. - */ -int zpool_shrink(struct zpool *zpool, unsigned int pages, - unsigned int *reclaimed) -{ - return zpool->driver->shrink ? - zpool->driver->shrink(zpool->pool, pages, reclaimed) : -EINVAL; -} - /** * zpool_map_handle() - Map a previously allocated handle into memory * @zpool: The zpool that the handle was allocated from @@ -359,24 +333,6 @@ u64 zpool_get_total_size(struct zpool *zpool) return zpool->driver->total_size(zpool->pool); } -/** - * zpool_evictable() - Test if zpool is potentially evictable - * @zpool: The zpool to test - * - * Zpool is only potentially evictable when it's created with struct - * zpool_ops.evict and its driver implements struct zpool_driver.shrink. - * - * However, it doesn't necessarily mean driver will use zpool_ops.evict - * in its implementation of zpool_driver.shrink. It could do internal - * defragmentation instead. - * - * Returns: true if potentially evictable; false otherwise. - */ -bool zpool_evictable(struct zpool *zpool) -{ - return zpool->driver->shrink; -} - /** * zpool_can_sleep_mapped - Test if zpool can sleep when do mapped. * @zpool: The zpool to test diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index e4d1ad521738..3f057970504e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -351,9 +351,7 @@ static void record_obj(unsigned long handle, unsigned long obj) #ifdef CONFIG_ZPOOL -static void *zs_zpool_create(const char *name, gfp_t gfp, - const struct zpool_ops *zpool_ops, - struct zpool *zpool) +static void *zs_zpool_create(const char *name, gfp_t gfp) { /* * Ignore global gfp flags: zs_malloc() may be invoked from diff --git a/mm/zswap.c b/mm/zswap.c index 0024ec5ed574..a4f8c20e161b 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -258,10 +258,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle); static int zswap_pool_get(struct zswap_pool *pool); static void zswap_pool_put(struct zswap_pool *pool); -static const struct zpool_ops zswap_zpool_ops = { - .evict = zswap_writeback_entry -}; - static bool zswap_is_full(void) { return totalram_pages() * zswap_max_pool_percent / 100 < @@ -379,12 +375,9 @@ static void zswap_free_entry(struct zswap_entry *entry) if (!entry->length) atomic_dec(&zswap_same_filled_pages); else { - /* zpool_evictable will be removed once all 3 backends have migrated */ - if (!zpool_evictable(entry->pool->zpool)) { - spin_lock(&entry->pool->lru_lock); - list_del(&entry->lru); - spin_unlock(&entry->pool->lru_lock); - } + spin_lock(&entry->pool->lru_lock); + list_del(&entry->lru); + spin_unlock(&entry->pool->lru_lock); zpool_free(entry->pool->zpool, entry->handle); zswap_pool_put(entry->pool); } @@ -665,12 +658,8 @@ static void shrink_worker(struct work_struct *w) shrink_work); int ret, failures = 0; - /* zpool_evictable will be removed once all 3 backends have migrated */ do { - if (zpool_evictable(pool->zpool)) - ret = zpool_shrink(pool->zpool, 1, NULL); - else - ret = zswap_reclaim_entry(pool); + ret = zswap_reclaim_entry(pool); if (ret) { zswap_reject_reclaim_fail++; if (ret != -EAGAIN) @@ -708,7 +697,7 @@ static struct zswap_pool *zswap_pool_create(char *type, char *compressor) /* unique name for each pool specifically required by zsmalloc */ snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count)); - pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops); + pool->zpool = zpool_create_pool(type, name, gfp); if (!pool->zpool) { pr_err("%s zpool not available\n", type); goto error; @@ -1394,8 +1383,7 @@ insert_entry: zswap_entry_put(tree, dupentry); } } while (ret == -EEXIST); - /* zpool_evictable will be removed once all 3 backends have migrated */ - if (entry->length && !zpool_evictable(entry->pool->zpool)) { + if (entry->length) { spin_lock(&entry->pool->lru_lock); list_add(&entry->lru, &entry->pool->lru); spin_unlock(&entry->pool->lru_lock); @@ -1514,8 +1502,7 @@ freeentry: if (!ret && zswap_exclusive_loads_enabled) { zswap_invalidate_entry(tree, entry); *exclusive = true; - } else if (entry->length && !zpool_evictable(entry->pool->zpool)) { - /* zpool_evictable will be removed once all 3 backends have migrated */ + } else if (entry->length) { spin_lock(&entry->pool->lru_lock); list_move(&entry->lru, &entry->pool->lru); spin_unlock(&entry->pool->lru_lock); -- cgit v1.2.3 From ff9d5ba202f98db53da98330b46b68a9228c63e4 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:14 +0200 Subject: mm: zswap: simplify writeback function zswap_writeback_entry() used to be a callback for the backends, which don't know about struct zswap_entry. Now that the only user is the generic zswap LRU reclaimer, it can be simplified: pass the pinned zswap_entry directly, and consolidate the refcount management in the shrink function. Link: https://lkml.kernel.org/r/20230612093815.133504-7-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Tested-by: Yosry Ahmed Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Minchan Kim Cc: Nhat Pham Cc: Sergey Senozhatsky Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- mm/zswap.c | 70 +++++++++++++++++--------------------------------------------- 1 file changed, 19 insertions(+), 51 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index a4f8c20e161b..3a6b07a19262 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -254,7 +254,8 @@ static bool zswap_has_pool; pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ zpool_get_type((p)->zpool)) -static int zswap_writeback_entry(struct zpool *pool, unsigned long handle); +static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr, + struct zswap_tree *tree); static int zswap_pool_get(struct zswap_pool *pool); static void zswap_pool_put(struct zswap_pool *pool); @@ -635,7 +636,7 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) zswap_entry_get(entry); spin_unlock(&tree->lock); - ret = zswap_writeback_entry(pool->zpool, entry->handle); + ret = zswap_writeback_entry(entry, zhdr, tree); spin_lock(&tree->lock); if (ret) { @@ -643,8 +644,17 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) spin_lock(&pool->lru_lock); list_move(&entry->lru, &pool->lru); spin_unlock(&pool->lru_lock); + goto put_unlock; } + /* Check for invalidate() race */ + if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) + goto put_unlock; + + /* Drop base reference */ + zswap_entry_put(tree, entry); + +put_unlock: /* Drop local reference */ zswap_entry_put(tree, entry); unlock: @@ -1045,16 +1055,14 @@ static int zswap_get_swap_cache_page(swp_entry_t entry, * the swap cache, the compressed version stored by zswap can be * freed. */ -static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) +static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr, + struct zswap_tree *tree) { - struct zswap_header *zhdr; - swp_entry_t swpentry; - struct zswap_tree *tree; - pgoff_t offset; - struct zswap_entry *entry; + swp_entry_t swpentry = zhdr->swpentry; struct page *page; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; + struct zpool *pool = entry->pool->zpool; u8 *src, *tmp = NULL; unsigned int dlen; @@ -1069,25 +1077,6 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) return -ENOMEM; } - /* extract swpentry from data */ - zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); - swpentry = zhdr->swpentry; /* here */ - tree = zswap_trees[swp_type(swpentry)]; - offset = swp_offset(swpentry); - zpool_unmap_handle(pool, handle); - - /* find and ref zswap entry */ - spin_lock(&tree->lock); - entry = zswap_entry_find_get(&tree->rbroot, offset); - if (!entry) { - /* entry was invalidated */ - spin_unlock(&tree->lock); - kfree(tmp); - return 0; - } - spin_unlock(&tree->lock); - BUG_ON(offset != entry->offset); - /* try to allocate swap cache page */ switch (zswap_get_swap_cache_page(swpentry, &page)) { case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */ @@ -1121,12 +1110,12 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); dlen = PAGE_SIZE; - zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO); + zhdr = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); src = (u8 *)zhdr + sizeof(struct zswap_header); if (!zpool_can_sleep_mapped(pool)) { memcpy(tmp, src, entry->length); src = tmp; - zpool_unmap_handle(pool, handle); + zpool_unmap_handle(pool, entry->handle); } mutex_lock(acomp_ctx->mutex); @@ -1141,7 +1130,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) if (!zpool_can_sleep_mapped(pool)) kfree(tmp); else - zpool_unmap_handle(pool, handle); + zpool_unmap_handle(pool, entry->handle); BUG_ON(ret); BUG_ON(dlen != PAGE_SIZE); @@ -1158,23 +1147,7 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) put_page(page); zswap_written_back_pages++; - spin_lock(&tree->lock); - /* drop local reference */ - zswap_entry_put(tree, entry); - - /* - * There are two possible situations for entry here: - * (1) refcount is 1(normal case), entry is valid and on the tree - * (2) refcount is 0, entry is freed and not on the tree - * because invalidate happened during writeback - * search the tree and free the entry if find entry - */ - if (entry == zswap_rb_search(&tree->rbroot, offset)) - zswap_entry_put(tree, entry); - spin_unlock(&tree->lock); - return ret; - fail: if (!zpool_can_sleep_mapped(pool)) kfree(tmp); @@ -1183,13 +1156,8 @@ fail: * if we get here due to ZSWAP_SWAPCACHE_EXIST * a load may be happening concurrently. * it is safe and okay to not free the entry. - * if we free the entry in the following put * it is also okay to return !0 */ - spin_lock(&tree->lock); - zswap_entry_put(tree, entry); - spin_unlock(&tree->lock); - return ret; } -- cgit v1.2.3 From 0bb488498c989e0d912d38b624df31922027c8c5 Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Mon, 12 Jun 2023 11:38:15 +0200 Subject: mm: zswap: remove zswap_header Previously, zswap_header served the purpose of storing the swpentry within zpool pages. This allowed zpool implementations to pass relevant information to the writeback function. However, with the current implementation, writeback is directly handled within zswap. Consequently, there is no longer a necessity for zswap_header, as the swp_entry_t can be stored directly in zswap_entry. Link: https://lkml.kernel.org/r/20230612093815.133504-8-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Tested-by: Yosry Ahmed Suggested-by: Yosry Ahmed Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Minchan Kim Cc: Nhat Pham Cc: Sergey Senozhatsky Cc: Seth Jennings Cc: Vitaly Wool Signed-off-by: Andrew Morton --- mm/zswap.c | 52 ++++++++++++++++++++++------------------------------ 1 file changed, 22 insertions(+), 30 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 3a6b07a19262..02d0a6f46f4a 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -197,7 +197,7 @@ struct zswap_pool { */ struct zswap_entry { struct rb_node rbnode; - pgoff_t offset; + swp_entry_t swpentry; int refcount; unsigned int length; struct zswap_pool *pool; @@ -209,10 +209,6 @@ struct zswap_entry { struct list_head lru; }; -struct zswap_header { - swp_entry_t swpentry; -}; - /* * The tree lock in the zswap_tree struct protects a few things: * - the rbtree @@ -254,7 +250,7 @@ static bool zswap_has_pool; pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name, \ zpool_get_type((p)->zpool)) -static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr, +static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_tree *tree); static int zswap_pool_get(struct zswap_pool *pool); static void zswap_pool_put(struct zswap_pool *pool); @@ -315,12 +311,14 @@ static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset) { struct rb_node *node = root->rb_node; struct zswap_entry *entry; + pgoff_t entry_offset; while (node) { entry = rb_entry(node, struct zswap_entry, rbnode); - if (entry->offset > offset) + entry_offset = swp_offset(entry->swpentry); + if (entry_offset > offset) node = node->rb_left; - else if (entry->offset < offset) + else if (entry_offset < offset) node = node->rb_right; else return entry; @@ -337,13 +335,15 @@ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, { struct rb_node **link = &root->rb_node, *parent = NULL; struct zswap_entry *myentry; + pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry); while (*link) { parent = *link; myentry = rb_entry(parent, struct zswap_entry, rbnode); - if (myentry->offset > entry->offset) + myentry_offset = swp_offset(myentry->swpentry); + if (myentry_offset > entry_offset) link = &(*link)->rb_left; - else if (myentry->offset < entry->offset) + else if (myentry_offset < entry_offset) link = &(*link)->rb_right; else { *dupentry = myentry; @@ -601,7 +601,6 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) static int zswap_reclaim_entry(struct zswap_pool *pool) { - struct zswap_header *zhdr; struct zswap_entry *entry; struct zswap_tree *tree; pgoff_t swpoffset; @@ -615,15 +614,13 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) } entry = list_last_entry(&pool->lru, struct zswap_entry, lru); list_del_init(&entry->lru); - zhdr = zpool_map_handle(pool->zpool, entry->handle, ZPOOL_MM_RO); - tree = zswap_trees[swp_type(zhdr->swpentry)]; - zpool_unmap_handle(pool->zpool, entry->handle); /* * Once the lru lock is dropped, the entry might get freed. The * swpoffset is copied to the stack, and entry isn't deref'd again * until the entry is verified to still be alive in the tree. */ - swpoffset = swp_offset(zhdr->swpentry); + swpoffset = swp_offset(entry->swpentry); + tree = zswap_trees[swp_type(entry->swpentry)]; spin_unlock(&pool->lru_lock); /* Check for invalidate() race */ @@ -636,7 +633,7 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) zswap_entry_get(entry); spin_unlock(&tree->lock); - ret = zswap_writeback_entry(entry, zhdr, tree); + ret = zswap_writeback_entry(entry, tree); spin_lock(&tree->lock); if (ret) { @@ -1055,10 +1052,10 @@ static int zswap_get_swap_cache_page(swp_entry_t entry, * the swap cache, the compressed version stored by zswap can be * freed. */ -static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header *zhdr, +static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_tree *tree) { - swp_entry_t swpentry = zhdr->swpentry; + swp_entry_t swpentry = entry->swpentry; struct page *page; struct scatterlist input, output; struct crypto_acomp_ctx *acomp_ctx; @@ -1098,7 +1095,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header * writing. */ spin_lock(&tree->lock); - if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) { + if (zswap_rb_search(&tree->rbroot, swp_offset(entry->swpentry)) != entry) { spin_unlock(&tree->lock); delete_from_swap_cache(page_folio(page)); ret = -ENOMEM; @@ -1110,8 +1107,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry, struct zswap_header acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); dlen = PAGE_SIZE; - zhdr = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); - src = (u8 *)zhdr + sizeof(struct zswap_header); + src = zpool_map_handle(pool, entry->handle, ZPOOL_MM_RO); if (!zpool_can_sleep_mapped(pool)) { memcpy(tmp, src, entry->length); src = tmp; @@ -1205,11 +1201,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, struct obj_cgroup *objcg = NULL; struct zswap_pool *pool; int ret; - unsigned int hlen, dlen = PAGE_SIZE; + unsigned int dlen = PAGE_SIZE; unsigned long handle, value; char *buf; u8 *src, *dst; - struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) }; gfp_t gfp; /* THP isn't supported */ @@ -1254,7 +1249,7 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, src = kmap_atomic(page); if (zswap_is_page_same_filled(src, &value)) { kunmap_atomic(src); - entry->offset = offset; + entry->swpentry = swp_entry(type, offset); entry->length = 0; entry->value = value; atomic_inc(&zswap_same_filled_pages); @@ -1308,11 +1303,10 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, } /* store */ - hlen = sizeof(zhdr); gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; if (zpool_malloc_support_movable(entry->pool->zpool)) gfp |= __GFP_HIGHMEM | __GFP_MOVABLE; - ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle); + ret = zpool_malloc(entry->pool->zpool, dlen, gfp, &handle); if (ret == -ENOSPC) { zswap_reject_compress_poor++; goto put_dstmem; @@ -1322,13 +1316,12 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, goto put_dstmem; } buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO); - memcpy(buf, &zhdr, hlen); - memcpy(buf + hlen, dst, dlen); + memcpy(buf, dst, dlen); zpool_unmap_handle(entry->pool->zpool, handle); mutex_unlock(acomp_ctx->mutex); /* populate entry */ - entry->offset = offset; + entry->swpentry = swp_entry(type, offset); entry->handle = handle; entry->length = dlen; @@ -1437,7 +1430,6 @@ static int zswap_frontswap_load(unsigned type, pgoff_t offset, /* decompress */ dlen = PAGE_SIZE; src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO); - src += sizeof(struct zswap_header); if (!zpool_can_sleep_mapped(entry->pool->zpool)) { memcpy(tmp, src, entry->length); -- cgit v1.2.3 From 1e3be4856f49d55c60b6cd500297b06acfe216a9 Mon Sep 17 00:00:00 2001 From: Tarun Sahu Date: Mon, 12 Jun 2023 15:05:14 +0530 Subject: mm/folio: replace set_compound_order with folio_set_order The patch ("mm/folio: Avoid special handling for order value 0 in folio_set_order") [1] removed the need for special handling of order = 0 in folio_set_order. Now, folio_set_order and set_compound_order becomes similar function. This patch removes the set_compound_order and uses folio_set_order instead. [1] https://lore.kernel.org/all/20230609183032.13E08C433D2@smtp.kernel.org/ Link: https://lkml.kernel.org/r/20230612093514.689846-1-tsahu@linux.ibm.com Signed-off-by: Tarun Sahu Reviewed-by Sidhartha Kumar Reviewed-by: Muchun Song Cc: Aneesh Kumar K.V Cc: Gerald Schaefer Cc: Matthew Wilcox Cc: Mike Kravetz Signed-off-by: Andrew Morton --- include/linux/mm.h | 10 ---------- mm/internal.h | 32 ++++++++++++++++---------------- 2 files changed, 16 insertions(+), 26 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index 8f40bf17d597..ab04756b2240 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1232,16 +1232,6 @@ static inline void folio_set_compound_dtor(struct folio *folio, void destroy_large_folio(struct folio *folio); -static inline void set_compound_order(struct page *page, unsigned int order) -{ - struct folio *folio = (struct folio *)page; - - folio->_folio_order = order; -#ifdef CONFIG_64BIT - folio->_folio_nr_pages = 1U << order; -#endif -} - /* Returns the number of bytes in this potentially compound page. */ static inline unsigned long page_size(struct page *page) { diff --git a/mm/internal.h b/mm/internal.h index 33b8b8f66af3..b0d8778dd910 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -387,12 +387,27 @@ extern void memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order); extern void __free_pages_core(struct page *page, unsigned int order); +/* + * This will have no effect, other than possibly generating a warning, if the + * caller passes in a non-large folio. + */ +static inline void folio_set_order(struct folio *folio, unsigned int order) +{ + if (WARN_ON_ONCE(!order || !folio_test_large(folio))) + return; + + folio->_folio_order = order; +#ifdef CONFIG_64BIT + folio->_folio_nr_pages = 1U << order; +#endif +} + static inline void prep_compound_head(struct page *page, unsigned int order) { struct folio *folio = (struct folio *)page; folio_set_compound_dtor(folio, COMPOUND_PAGE_DTOR); - set_compound_order(page, order); + folio_set_order(folio, order); atomic_set(&folio->_entire_mapcount, -1); atomic_set(&folio->_nr_pages_mapped, 0); atomic_set(&folio->_pincount, 0); @@ -432,21 +447,6 @@ void memmap_init_range(unsigned long, int, unsigned long, unsigned long, int split_free_page(struct page *free_page, unsigned int order, unsigned long split_pfn_offset); -/* - * This will have no effect, other than possibly generating a warning, if the - * caller passes in a non-large folio. - */ -static inline void folio_set_order(struct folio *folio, unsigned int order) -{ - if (WARN_ON_ONCE(!order || !folio_test_large(folio))) - return; - - folio->_folio_order = order; -#ifdef CONFIG_64BIT - folio->_folio_nr_pages = 1U << order; -#endif -} - #if defined CONFIG_COMPACTION || defined CONFIG_CMA /* -- cgit v1.2.3 From 65ac132027a884c411b8f9f96d240ba2dde34dec Mon Sep 17 00:00:00 2001 From: "Liam R. Howlett" Date: Wed, 31 May 2023 21:54:02 -0400 Subject: userfaultfd: fix regression in userfaultfd_unmap_prep() Android reported a performance regression in the userfaultfd unmap path. A closer inspection on the userfaultfd_unmap_prep() change showed that a second tree walk would be necessary in the reworked code. Fix the regression by passing each VMA that will be unmapped through to the userfaultfd_unmap_prep() function as they are added to the unmap list, instead of re-walking the tree for the VMA. Link: https://lkml.kernel.org/r/20230601015402.2819343-1-Liam.Howlett@oracle.com Fixes: 69dbe6daf104 ("userfaultfd: use maple tree iterator to iterate VMAs") Signed-off-by: Liam R. Howlett Reported-by: Suren Baghdasaryan Suggested-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- fs/userfaultfd.c | 35 +++++++++++++++-------------------- include/linux/userfaultfd_k.h | 6 +++--- mm/mmap.c | 31 +++++++++++++++---------------- 3 files changed, 33 insertions(+), 39 deletions(-) (limited to 'mm') diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c index 478e2b169c13..0aa5caac5164 100644 --- a/fs/userfaultfd.c +++ b/fs/userfaultfd.c @@ -852,31 +852,26 @@ static bool has_unmap_ctx(struct userfaultfd_ctx *ctx, struct list_head *unmaps, return false; } -int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start, +int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *unmaps) { - VMA_ITERATOR(vmi, mm, start); - struct vm_area_struct *vma; - - for_each_vma_range(vmi, vma, end) { - struct userfaultfd_unmap_ctx *unmap_ctx; - struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; + struct userfaultfd_unmap_ctx *unmap_ctx; + struct userfaultfd_ctx *ctx = vma->vm_userfaultfd_ctx.ctx; - if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || - has_unmap_ctx(ctx, unmaps, start, end)) - continue; + if (!ctx || !(ctx->features & UFFD_FEATURE_EVENT_UNMAP) || + has_unmap_ctx(ctx, unmaps, start, end)) + return 0; - unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); - if (!unmap_ctx) - return -ENOMEM; + unmap_ctx = kzalloc(sizeof(*unmap_ctx), GFP_KERNEL); + if (!unmap_ctx) + return -ENOMEM; - userfaultfd_ctx_get(ctx); - atomic_inc(&ctx->mmap_changing); - unmap_ctx->ctx = ctx; - unmap_ctx->start = start; - unmap_ctx->end = end; - list_add_tail(&unmap_ctx->list, unmaps); - } + userfaultfd_ctx_get(ctx); + atomic_inc(&ctx->mmap_changing); + unmap_ctx->ctx = ctx; + unmap_ctx->start = start; + unmap_ctx->end = end; + list_add_tail(&unmap_ctx->list, unmaps); return 0; } diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index d78b01524349..ac7b0c96d351 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -188,8 +188,8 @@ extern bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end); -extern int userfaultfd_unmap_prep(struct mm_struct *mm, unsigned long start, - unsigned long end, struct list_head *uf); +extern int userfaultfd_unmap_prep(struct vm_area_struct *vma, + unsigned long start, unsigned long end, struct list_head *uf); extern void userfaultfd_unmap_complete(struct mm_struct *mm, struct list_head *uf); extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma); @@ -271,7 +271,7 @@ static inline bool userfaultfd_remove(struct vm_area_struct *vma, return true; } -static inline int userfaultfd_unmap_prep(struct mm_struct *mm, +static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct list_head *uf) { diff --git a/mm/mmap.c b/mm/mmap.c index f084b7940431..4fc496bc5b95 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2417,6 +2417,21 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, goto munmap_sidetree_failed; count++; + if (unlikely(uf)) { + /* + * If userfaultfd_unmap_prep returns an error the vmas + * will remain split, but userland will get a + * highly unexpected error anyway. This is no + * different than the case where the first of the two + * __split_vma fails, but we don't undo the first + * split, despite we could. This is unlikely enough + * failure that it's not worth optimizing it for. + */ + error = userfaultfd_unmap_prep(next, start, end, uf); + + if (error) + goto userfaultfd_error; + } #ifdef CONFIG_DEBUG_VM_MAPLE_TREE BUG_ON(next->vm_start < start); BUG_ON(next->vm_start > end); @@ -2429,22 +2444,6 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, if (!next) next = vma_next(vmi); - if (unlikely(uf)) { - /* - * If userfaultfd_unmap_prep returns an error the vmas - * will remain split, but userland will get a - * highly unexpected error anyway. This is no - * different than the case where the first of the two - * __split_vma fails, but we don't undo the first - * split, despite we could. This is unlikely enough - * failure that it's not worth optimizing it for. - */ - error = userfaultfd_unmap_prep(mm, start, end, uf); - - if (error) - goto userfaultfd_error; - } - #if defined(CONFIG_DEBUG_VM_MAPLE_TREE) /* Make sure no VMAs are about to be lost. */ { -- cgit v1.2.3 From 833dfc0090b3f8017ddac82d818b2d8e5ceb61db Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 10 Jun 2023 11:46:15 +0800 Subject: mm: compaction: mark kcompactd_run() and kcompactd_stop() __meminit Add __meminit to kcompactd_run() and kcompactd_stop() to ensure they're default to __init when memory hotplug is not enabled. Link: https://lkml.kernel.org/r/20230610034615.997813-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: Baolin Wang Signed-off-by: Andrew Morton --- include/linux/compaction.h | 4 ++-- mm/compaction.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 57b16e69c19a..e94776496049 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -98,8 +98,8 @@ extern void compaction_defer_reset(struct zone *zone, int order, bool compaction_zonelist_suitable(struct alloc_context *ac, int order, int alloc_flags); -extern void kcompactd_run(int nid); -extern void kcompactd_stop(int nid); +extern void __meminit kcompactd_run(int nid); +extern void __meminit kcompactd_stop(int nid); extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int highest_zoneidx); #else diff --git a/mm/compaction.c b/mm/compaction.c index 767b0815c874..6149a2d324be 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -3050,7 +3050,7 @@ static int kcompactd(void *p) * This kcompactd start function will be called by init and node-hot-add. * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added. */ -void kcompactd_run(int nid) +void __meminit kcompactd_run(int nid) { pg_data_t *pgdat = NODE_DATA(nid); @@ -3068,7 +3068,7 @@ void kcompactd_run(int nid) * Called by memory hotplug when all memory in a node is offlined. Caller must * be holding mem_hotplug_begin/done(). */ -void kcompactd_stop(int nid) +void __meminit kcompactd_stop(int nid) { struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd; -- cgit v1.2.3 From 33ee4f185827d99fd0957ab3cea00b2266fc5abc Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 10 Jun 2023 11:41:14 +0800 Subject: memory tier: remove unneeded !IS_ENABLED(CONFIG_MIGRATION) check establish_demotion_targets() is defined while CONFIG_MIGRATION is enabled. There's no need to check it again. Link: https://lkml.kernel.org/r/20230610034114.981861-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Reviewed-by: David Hildenbrand Reviewed-by: Yang Shi Signed-off-by: Andrew Morton --- mm/memory-tiers.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memory-tiers.c b/mm/memory-tiers.c index dd04f0ce5277..a516e303e304 100644 --- a/mm/memory-tiers.c +++ b/mm/memory-tiers.c @@ -366,7 +366,7 @@ static void establish_demotion_targets(void) lockdep_assert_held_once(&memory_tier_lock); - if (!node_demotion || !IS_ENABLED(CONFIG_MIGRATION)) + if (!node_demotion) return; disable_all_demotion_targets(); -- cgit v1.2.3 From 3a6358c0dbe6a286a4f4504ba392a6039a9fbd12 Mon Sep 17 00:00:00 2001 From: Yu Ma Date: Fri, 9 Jun 2023 23:07:30 -0400 Subject: percpu-internal/pcpu_chunk: re-layout pcpu_chunk structure to reduce false sharing When running UnixBench/Execl throughput case, false sharing is observed due to frequent read on base_addr and write on free_bytes, chunk_md. UnixBench/Execl represents a class of workload where bash scripts are spawned frequently to do some short jobs. It will do system call on execl frequently, and execl will call mm_init to initialize mm_struct of the process. mm_init will call __percpu_counter_init for percpu_counters initialization. Then pcpu_alloc is called to read the base_addr of pcpu_chunk for memory allocation. Inside pcpu_alloc, it will call pcpu_alloc_area to allocate memory from a specified chunk. This function will update "free_bytes" and "chunk_md" to record the rest free bytes and other meta data for this chunk. Correspondingly, pcpu_free_area will also update these 2 members when free memory. Call trace from perf is as below: + 57.15% 0.01% execl [kernel.kallsyms] [k] __percpu_counter_init + 57.13% 0.91% execl [kernel.kallsyms] [k] pcpu_alloc - 55.27% 54.51% execl [kernel.kallsyms] [k] osq_lock - 53.54% 0x654278696e552f34 main __execve entry_SYSCALL_64_after_hwframe do_syscall_64 __x64_sys_execve do_execveat_common.isra.47 alloc_bprm mm_init __percpu_counter_init pcpu_alloc - __mutex_lock.isra.17 In current pcpu_chunk layout, `base_addr' is in the same cache line with `free_bytes' and `chunk_md', and `base_addr' is at the last 8 bytes. This patch moves `bound_map' up to `base_addr', to let `base_addr' locate in a new cacheline. With this change, on Intel Sapphire Rapids 112C/224T platform, based on v6.4-rc4, the 160 parallel score improves by 24%. The pcpu_chunk struct is a backing data structure per chunk, so the additional memory should not be dramatic. A chunk covers ballpark between 64kb and 512kb memory depending on some config and boot time stuff, so I believe the additional memory used here is nominal at best. Working the #s on my desktop: Percpu: 58624 kB 28 cores -> ~2.1MB of percpu memory. At say ~128KB per chunk -> 33 chunks, generously 40 chunks. Adding alignment might bump the chunk size ~64 bytes, so in total ~2KB of overhead? I believe we can do a little better to avoid eating that full padding, so likely less than that. [dennis@kernel.org: changelog details] Link: https://lkml.kernel.org/r/20230610030730.110074-1-yu.ma@intel.com Signed-off-by: Yu Ma Reviewed-by: Tim Chen Acked-by: Dennis Zhou Cc: Dan Williams Cc: Dave Hansen Cc: Liam R. Howlett Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/percpu-internal.h | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h index f9847c131998..cdd0aa597a81 100644 --- a/mm/percpu-internal.h +++ b/mm/percpu-internal.h @@ -41,10 +41,17 @@ struct pcpu_chunk { struct list_head list; /* linked to pcpu_slot lists */ int free_bytes; /* free bytes in the chunk */ struct pcpu_block_md chunk_md; - void *base_addr; /* base address of this chunk */ + unsigned long *bound_map; /* boundary map */ + + /* + * base_addr is the base address of this chunk. + * To reduce false sharing, current layout is optimized to make sure + * base_addr locate in the different cacheline with free_bytes and + * chunk_md. + */ + void *base_addr ____cacheline_aligned_in_smp; unsigned long *alloc_map; /* allocation map */ - unsigned long *bound_map; /* boundary map */ struct pcpu_block_md *md_blocks; /* metadata blocks */ void *data; /* chunk data */ -- cgit v1.2.3 From e1ad3e66676479d6a0af6be953767f865c902111 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Fri, 9 Jun 2023 16:44:45 -0700 Subject: mm/khugepaged: use DEFINE_READ_MOSTLY_HASHTABLE macro These are equivalent, but DEFINE_READ_MOSTLY_HASHTABLE exists to define a hashtable in the .data..read_mostly section. Link: https://lkml.kernel.org/r/20230609-khugepage-v1-1-dad4e8382298@google.com Signed-off-by: Nick Desaulniers Reviewed-by: Yang Shi Signed-off-by: Andrew Morton --- mm/khugepaged.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 0b4f00712895..87e962c79a52 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -88,7 +88,7 @@ static unsigned int khugepaged_max_ptes_swap __read_mostly; static unsigned int khugepaged_max_ptes_shared __read_mostly; #define MM_SLOTS_HASH_BITS 10 -static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); +static DEFINE_READ_MOSTLY_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); static struct kmem_cache *mm_slot_cache __read_mostly; -- cgit v1.2.3 From 399fd496c49b399ff9b6fc87fefadb66e156aa5e Mon Sep 17 00:00:00 2001 From: Kassey Li Date: Tue, 13 Jun 2023 17:25:33 +0800 Subject: mm/page_owner/cma: show pfn in cma/page_owner with hex format cma: display pfn as well as pfn_to_page(pfn) page_owner: display pfn in hex rather than decimal Link: https://lkml.kernel.org/r/20230613092533.15449-1-quic_yingangl@quicinc.com Signed-off-by: Kassey Li Cc: Joonsoo Kim Cc: Minchan Kim Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/cma.c | 4 ++-- mm/page_owner.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/cma.c b/mm/cma.c index 6268d6620254..a4cfe995e11e 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -483,8 +483,8 @@ struct page *cma_alloc(struct cma *cma, unsigned long count, if (ret != -EBUSY) break; - pr_debug("%s(): memory range at %p is busy, retrying\n", - __func__, pfn_to_page(pfn)); + pr_debug("%s(): memory range at pfn 0x%lx %p is busy, retrying\n", + __func__, pfn, pfn_to_page(pfn)); trace_cma_alloc_busy_retry(cma->name, pfn, pfn_to_page(pfn), count, align); diff --git a/mm/page_owner.c b/mm/page_owner.c index 31169b3e7f06..c93baef0148f 100644 --- a/mm/page_owner.c +++ b/mm/page_owner.c @@ -418,7 +418,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn, pageblock_mt = get_pageblock_migratetype(page); page_mt = gfp_migratetype(page_owner->gfp_mask); ret += scnprintf(kbuf + ret, count - ret, - "PFN %lu type %s Block %lu type %s Flags %pGp\n", + "PFN 0x%lx type %s Block %lu type %s Flags %pGp\n", pfn, migratetype_names[page_mt], pfn >> pageblock_order, -- cgit v1.2.3 From 6c77b607ee26472fb945aa41734281c39d06d68f Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Wed, 14 Jun 2023 22:36:12 +0800 Subject: mm: kill lock|unlock_page_memcg() Since commit c7c3dec1c9db ("mm: rmap: remove lock_page_memcg()"), no more user, kill lock_page_memcg() and unlock_page_memcg(). Link: https://lkml.kernel.org/r/20230614143612.62575-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Acked-by: Johannes Weiner Reviewed-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- Documentation/admin-guide/cgroup-v1/memory.rst | 2 +- include/linux/memcontrol.h | 12 +----------- mm/filemap.c | 2 +- mm/memcontrol.c | 18 ++++-------------- mm/page-writeback.c | 6 +++--- 5 files changed, 10 insertions(+), 30 deletions(-) (limited to 'mm') diff --git a/Documentation/admin-guide/cgroup-v1/memory.rst b/Documentation/admin-guide/cgroup-v1/memory.rst index 47d1d7d932a8..fabaad3fd9c2 100644 --- a/Documentation/admin-guide/cgroup-v1/memory.rst +++ b/Documentation/admin-guide/cgroup-v1/memory.rst @@ -297,7 +297,7 @@ Lock order is as follows:: Page lock (PG_locked bit of page->flags) mm->page_table_lock or split pte_lock - lock_page_memcg (memcg->move_lock) + folio_memcg_lock (memcg->move_lock) mapping->i_pages lock lruvec->lru_lock. diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 00a88cf947e1..c3d3a0c09315 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -419,7 +419,7 @@ static inline struct obj_cgroup *__folio_objcg(struct folio *folio) * * - the folio lock * - LRU isolation - * - lock_page_memcg() + * - folio_memcg_lock() * - exclusive reference * - mem_cgroup_trylock_pages() * @@ -949,8 +949,6 @@ void mem_cgroup_print_oom_group(struct mem_cgroup *memcg); void folio_memcg_lock(struct folio *folio); void folio_memcg_unlock(struct folio *folio); -void lock_page_memcg(struct page *page); -void unlock_page_memcg(struct page *page); void __mod_memcg_state(struct mem_cgroup *memcg, int idx, int val); @@ -1438,14 +1436,6 @@ mem_cgroup_print_oom_meminfo(struct mem_cgroup *memcg) { } -static inline void lock_page_memcg(struct page *page) -{ -} - -static inline void unlock_page_memcg(struct page *page) -{ -} - static inline void folio_memcg_lock(struct folio *folio) { } diff --git a/mm/filemap.c b/mm/filemap.c index 00933089b8b6..758bbdf300e7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -117,7 +117,7 @@ * ->i_pages lock (page_remove_rmap->set_page_dirty) * bdi.wb->list_lock (page_remove_rmap->set_page_dirty) * ->inode->i_lock (page_remove_rmap->set_page_dirty) - * ->memcg->move_lock (page_remove_rmap->lock_page_memcg) + * ->memcg->move_lock (page_remove_rmap->folio_memcg_lock) * bdi.wb->list_lock (zap_pte_range->set_page_dirty) * ->inode->i_lock (zap_pte_range->set_page_dirty) * ->private_lock (zap_pte_range->block_dirty_folio) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 93056918e956..cf06b1c9b3bb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2148,17 +2148,12 @@ again: * When charge migration first begins, we can have multiple * critical sections holding the fast-path RCU lock and one * holding the slowpath move_lock. Track the task who has the - * move_lock for unlock_page_memcg(). + * move_lock for folio_memcg_unlock(). */ memcg->move_lock_task = current; memcg->move_lock_flags = flags; } -void lock_page_memcg(struct page *page) -{ - folio_memcg_lock(page_folio(page)); -} - static void __folio_memcg_unlock(struct mem_cgroup *memcg) { if (memcg && memcg->move_lock_task == current) { @@ -2186,11 +2181,6 @@ void folio_memcg_unlock(struct folio *folio) __folio_memcg_unlock(folio_memcg(folio)); } -void unlock_page_memcg(struct page *page) -{ - folio_memcg_unlock(page_folio(page)); -} - struct memcg_stock_pcp { local_lock_t stock_lock; struct mem_cgroup *cached; /* this never be root cgroup */ @@ -2866,7 +2856,7 @@ static void commit_charge(struct folio *folio, struct mem_cgroup *memcg) * * - the page lock * - LRU isolation - * - lock_page_memcg() + * - folio_memcg_lock() * - exclusive reference * - mem_cgroup_trylock_pages() */ @@ -5829,7 +5819,7 @@ static int mem_cgroup_move_account(struct page *page, * with (un)charging, migration, LRU putback, or anything else * that would rely on a stable page's memory cgroup. * - * Note that lock_page_memcg is a memcg lock, not a page lock, + * Note that folio_memcg_lock is a memcg lock, not a page lock, * to save space. As soon as we switch page's memory cgroup to a * new memcg that isn't locked, the above state can change * concurrently again. Make sure we're truly done with it. @@ -6320,7 +6310,7 @@ static void mem_cgroup_move_charge(void) { lru_add_drain_all(); /* - * Signal lock_page_memcg() to take the memcg's move_lock + * Signal folio_memcg_lock() to take the memcg's move_lock * while we're moving its pages to another memcg. Then wait * for already started RCU-only updates to finish. */ diff --git a/mm/page-writeback.c b/mm/page-writeback.c index db7943999007..1d17fb1ec863 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2597,7 +2597,7 @@ EXPORT_SYMBOL(noop_dirty_folio); /* * Helper function for set_page_dirty family. * - * Caller must hold lock_page_memcg(). + * Caller must hold folio_memcg_lock(). * * NOTE: This relies on being atomic wrt interrupts. */ @@ -2631,7 +2631,7 @@ static void folio_account_dirtied(struct folio *folio, /* * Helper function for deaccounting dirty page without writeback. * - * Caller must hold lock_page_memcg(). + * Caller must hold folio_memcg_lock(). */ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) { @@ -2650,7 +2650,7 @@ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) * If warn is true, then emit a warning if the folio is not uptodate and has * not been truncated. * - * The caller must hold lock_page_memcg(). Most callers have the folio + * The caller must hold folio_memcg_lock(). Most callers have the folio * locked. A few have the folio blocked from truncation through other * means (eg zap_vma_pages() has it mapped and is holding the page table * lock). This can also be called from mark_buffer_dirty(), which I -- cgit v1.2.3 From 418fd29d9de53b143e28a07430e08dd414b74c3d Mon Sep 17 00:00:00 2001 From: Domenico Cerasuolo Date: Wed, 14 Jun 2023 16:31:22 +0200 Subject: mm: zswap: invaldiate entry after writeback When an entry started writeback, it used to be invalidated with ref count logic alone, meaning that it would stay on the tree until all references were put. The problem with this behavior is that as soon as the writeback started, the ownership of the data held by the entry is passed to the swapcache and should not be left in zswap too. Currently there are no known issues because of this, but this change explicitly invalidates an entry that started writeback to reduce opportunities for future bugs. This patch is a follow up on the series titled "mm: zswap: move writeback LRU from zpool to zswap" + commit f090b7949768("mm: zswap: support exclusive loads"). Link: https://lkml.kernel.org/r/20230614143122.74471-1-cerasuolodomenico@gmail.com Signed-off-by: Domenico Cerasuolo Suggested-by: Johannes Weiner Acked-by: Johannes Weiner Cc: Dan Streetman Cc: Nhat Pham Cc: Seth Jennings Cc: Vitaly Wool Cc: Yosry Ahmed Signed-off-by: Andrew Morton --- mm/zswap.c | 33 +++++++++++++++++---------------- 1 file changed, 17 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 02d0a6f46f4a..c122f042a49d 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -599,6 +599,16 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) return NULL; } +static void zswap_invalidate_entry(struct zswap_tree *tree, + struct zswap_entry *entry) +{ + /* remove from rbtree */ + zswap_rb_erase(&tree->rbroot, entry); + + /* drop the initial reference from entry creation */ + zswap_entry_put(tree, entry); +} + static int zswap_reclaim_entry(struct zswap_pool *pool) { struct zswap_entry *entry; @@ -644,12 +654,13 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) goto put_unlock; } - /* Check for invalidate() race */ - if (entry != zswap_rb_search(&tree->rbroot, swpoffset)) - goto put_unlock; - - /* Drop base reference */ - zswap_entry_put(tree, entry); + /* + * Writeback started successfully, the page now belongs to the + * swapcache. Drop the entry from zswap - unless invalidate already + * took it out while we had the tree->lock released for IO. + */ + if (entry == zswap_rb_search(&tree->rbroot, swpoffset)) + zswap_invalidate_entry(tree, entry); put_unlock: /* Drop local reference */ @@ -1376,16 +1387,6 @@ shrink: goto reject; } -static void zswap_invalidate_entry(struct zswap_tree *tree, - struct zswap_entry *entry) -{ - /* remove from rbtree */ - zswap_rb_erase(&tree->rbroot, entry); - - /* drop the initial reference from entry creation */ - zswap_entry_put(tree, entry); -} - /* * returns 0 if the page was successfully decompressed * return -1 on entry not found or error -- cgit v1.2.3 From 452c03fdbed0d19f907c877a6a9edd226b1ebad9 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Wed, 14 Jun 2023 11:51:16 +0200 Subject: kasan: add support for kasan.fault=panic_on_write KASAN's boot time kernel parameter 'kasan.fault=' currently supports 'report' and 'panic', which results in either only reporting bugs or also panicking on reports. However, some users may wish to have more control over when KASAN reports result in a kernel panic: in particular, KASAN reported invalid _writes_ are of special interest, because they have greater potential to corrupt random kernel memory or be more easily exploited. To panic on invalid writes only, introduce 'kasan.fault=panic_on_write', which allows users to choose to continue running on invalid reads, but panic only on invalid writes. Link: https://lkml.kernel.org/r/20230614095158.1133673-1-elver@google.com Signed-off-by: Marco Elver Reviewed-by: Alexander Potapenko Cc: Aleksandr Nogikh Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Jonathan Corbet Cc: Taras Madan Cc: Vincenzo Frascino Signed-off-by: Andrew Morton --- Documentation/dev-tools/kasan.rst | 7 ++++--- mm/kasan/report.c | 31 ++++++++++++++++++++++++++----- 2 files changed, 30 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst index e66916a483cd..7f37a46af574 100644 --- a/Documentation/dev-tools/kasan.rst +++ b/Documentation/dev-tools/kasan.rst @@ -107,9 +107,10 @@ effectively disables ``panic_on_warn`` for KASAN reports. Alternatively, independent of ``panic_on_warn``, the ``kasan.fault=`` boot parameter can be used to control panic and reporting behaviour: -- ``kasan.fault=report`` or ``=panic`` controls whether to only print a KASAN - report or also panic the kernel (default: ``report``). The panic happens even - if ``kasan_multi_shot`` is enabled. +- ``kasan.fault=report``, ``=panic``, or ``=panic_on_write`` controls whether + to only print a KASAN report, panic the kernel, or panic the kernel on + invalid writes only (default: ``report``). The panic happens even if + ``kasan_multi_shot`` is enabled. Software and Hardware Tag-Based KASAN modes (see the section about various modes below) support altering stack trace collection behavior: diff --git a/mm/kasan/report.c b/mm/kasan/report.c index 84d9f3b37014..ca4b6ff080a6 100644 --- a/mm/kasan/report.c +++ b/mm/kasan/report.c @@ -43,6 +43,7 @@ enum kasan_arg_fault { KASAN_ARG_FAULT_DEFAULT, KASAN_ARG_FAULT_REPORT, KASAN_ARG_FAULT_PANIC, + KASAN_ARG_FAULT_PANIC_ON_WRITE, }; static enum kasan_arg_fault kasan_arg_fault __ro_after_init = KASAN_ARG_FAULT_DEFAULT; @@ -57,6 +58,8 @@ static int __init early_kasan_fault(char *arg) kasan_arg_fault = KASAN_ARG_FAULT_REPORT; else if (!strcmp(arg, "panic")) kasan_arg_fault = KASAN_ARG_FAULT_PANIC; + else if (!strcmp(arg, "panic_on_write")) + kasan_arg_fault = KASAN_ARG_FAULT_PANIC_ON_WRITE; else return -EINVAL; @@ -211,7 +214,7 @@ static void start_report(unsigned long *flags, bool sync) pr_err("==================================================================\n"); } -static void end_report(unsigned long *flags, const void *addr) +static void end_report(unsigned long *flags, const void *addr, bool is_write) { if (addr) trace_error_report_end(ERROR_DETECTOR_KASAN, @@ -220,8 +223,18 @@ static void end_report(unsigned long *flags, const void *addr) spin_unlock_irqrestore(&report_lock, *flags); if (!test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) check_panic_on_warn("KASAN"); - if (kasan_arg_fault == KASAN_ARG_FAULT_PANIC) + switch (kasan_arg_fault) { + case KASAN_ARG_FAULT_DEFAULT: + case KASAN_ARG_FAULT_REPORT: + break; + case KASAN_ARG_FAULT_PANIC: panic("kasan.fault=panic set ...\n"); + break; + case KASAN_ARG_FAULT_PANIC_ON_WRITE: + if (is_write) + panic("kasan.fault=panic_on_write set ...\n"); + break; + } add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE); lockdep_on(); report_suppress_stop(); @@ -536,7 +549,11 @@ void kasan_report_invalid_free(void *ptr, unsigned long ip, enum kasan_report_ty print_report(&info); - end_report(&flags, ptr); + /* + * Invalid free is considered a "write" since the allocator's metadata + * updates involves writes. + */ + end_report(&flags, ptr, true); } /* @@ -570,7 +587,7 @@ bool kasan_report(const void *addr, size_t size, bool is_write, print_report(&info); - end_report(&irq_flags, (void *)addr); + end_report(&irq_flags, (void *)addr, is_write); out: user_access_restore(ua_flags); @@ -596,7 +613,11 @@ void kasan_report_async(void) pr_err("Asynchronous fault: no details available\n"); pr_err("\n"); dump_stack_lvl(KERN_ERR); - end_report(&flags, NULL); + /* + * Conservatively set is_write=true, because no details are available. + * In this mode, kasan.fault=panic_on_write is like kasan.fault=panic. + */ + end_report(&flags, NULL, true); } #endif /* CONFIG_KASAN_HW_TAGS */ -- cgit v1.2.3 From c9223a4aede774b0cea2a9944bc5dac48683e802 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Tue, 13 Jun 2023 19:13:10 -0700 Subject: mm/gup_test.c: convert verify_dma_pinned() to us folios verify_dma_pinned() checks that pages are dma-pinned. We can convert this to use folios. Link: https://lkml.kernel.org/r/20230614021312.34085-4-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Lorenzo Stoakes Signed-off-by: Andrew Morton --- mm/gup_test.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/gup_test.c b/mm/gup_test.c index 1668ce0e0783..26f45fe45c48 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -40,24 +40,25 @@ static void verify_dma_pinned(unsigned int cmd, struct page **pages, unsigned long nr_pages) { unsigned long i; - struct page *page; + struct folio *folio; switch (cmd) { case PIN_FAST_BENCHMARK: case PIN_BASIC_TEST: case PIN_LONGTERM_BENCHMARK: for (i = 0; i < nr_pages; i++) { - page = pages[i]; - if (WARN(!page_maybe_dma_pinned(page), + folio = page_folio(pages[i]); + + if (WARN(!folio_maybe_dma_pinned(folio), "pages[%lu] is NOT dma-pinned\n", i)) { - dump_page(page, "gup_test failure"); + dump_page(&folio->page, "gup_test failure"); break; } else if (cmd == PIN_LONGTERM_BENCHMARK && - WARN(!is_longterm_pinnable_page(page), + WARN(!folio_is_longterm_pinnable(folio), "pages[%lu] is NOT pinnable but pinned\n", i)) { - dump_page(page, "gup_test failure"); + dump_page(&folio->page, "gup_test failure"); break; } } -- cgit v1.2.3 From 503670ee6d0670e114a66b8cf2bcd3f71f53c2f7 Mon Sep 17 00:00:00 2001 From: "Vishal Moola (Oracle)" Date: Tue, 13 Jun 2023 19:13:11 -0700 Subject: mm/gup.c: reorganize try_get_folio() try_get_folio() takes in a page, then chooses to do some folio operations based on the flags (either FOLL_GET or FOLL_PIN). We can rewrite this function to be more purpose oriented. After calling try_get_folio(), if neither FOLL_GET nor FOLL_PIN are set, warn and fail. If FOLL_GET is set we can return the result. If FOLL_GET is not set then FOLL_PIN is set, so we pin the folio. This change assists with folio conversions, and makes the function more readable. Link: https://lkml.kernel.org/r/20230614021312.34085-5-vishal.moola@gmail.com Signed-off-by: Vishal Moola (Oracle) Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/gup.c | 86 ++++++++++++++++++++++++++++++++-------------------------------- 1 file changed, 43 insertions(+), 43 deletions(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index 38986e522d34..ce14d4d28503 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -124,58 +124,58 @@ retry: */ struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags) { + struct folio *folio; + + if (WARN_ON_ONCE((flags & (FOLL_GET | FOLL_PIN)) == 0)) + return NULL; + if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page))) return NULL; - if (flags & FOLL_GET) - return try_get_folio(page, refs); - else if (flags & FOLL_PIN) { - struct folio *folio; + folio = try_get_folio(page, refs); - /* - * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a - * right zone, so fail and let the caller fall back to the slow - * path. - */ - if (unlikely((flags & FOLL_LONGTERM) && - !is_longterm_pinnable_page(page))) - return NULL; + if (flags & FOLL_GET) + return folio; - /* - * CAUTION: Don't use compound_head() on the page before this - * point, the result won't be stable. - */ - folio = try_get_folio(page, refs); - if (!folio) - return NULL; + /* FOLL_PIN is set */ + if (!folio) + return NULL; - /* - * When pinning a large folio, use an exact count to track it. - * - * However, be sure to *also* increment the normal folio - * refcount field at least once, so that the folio really - * is pinned. That's why the refcount from the earlier - * try_get_folio() is left intact. - */ - if (folio_test_large(folio)) - atomic_add(refs, &folio->_pincount); - else - folio_ref_add(folio, - refs * (GUP_PIN_COUNTING_BIAS - 1)); - /* - * Adjust the pincount before re-checking the PTE for changes. - * This is essentially a smp_mb() and is paired with a memory - * barrier in page_try_share_anon_rmap(). - */ - smp_mb__after_atomic(); + /* + * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a + * right zone, so fail and let the caller fall back to the slow + * path. + */ + if (unlikely((flags & FOLL_LONGTERM) && + !folio_is_longterm_pinnable(folio))) { + if (!put_devmap_managed_page_refs(&folio->page, refs)) + folio_put_refs(folio, refs); + return NULL; + } - node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); + /* + * When pinning a large folio, use an exact count to track it. + * + * However, be sure to *also* increment the normal folio + * refcount field at least once, so that the folio really + * is pinned. That's why the refcount from the earlier + * try_get_folio() is left intact. + */ + if (folio_test_large(folio)) + atomic_add(refs, &folio->_pincount); + else + folio_ref_add(folio, + refs * (GUP_PIN_COUNTING_BIAS - 1)); + /* + * Adjust the pincount before re-checking the PTE for changes. + * This is essentially a smp_mb() and is paired with a memory + * barrier in page_try_share_anon_rmap(). + */ + smp_mb__after_atomic(); - return folio; - } + node_stat_mod_folio(folio, NR_FOLL_PIN_ACQUIRED, refs); - WARN_ON_ONCE(1); - return NULL; + return folio; } static void gup_put_folio(struct folio *folio, int refs, unsigned int flags) -- cgit v1.2.3 From aa13779be6b7d1ce0e6ba96b400b351de6f1a326 Mon Sep 17 00:00:00 2001 From: SeongJae Park Date: Thu, 15 Jun 2023 18:33:22 +0000 Subject: mm/damon/core-test: add a test for damon_set_attrs() Commit 5ff6e2fff88e ("mm/damon/core: fix divide error in damon_nr_accesses_to_accesses_bp()") fixed a bug by adding arguments validation in damon_set_attrs(). Add a unit test for the added validation to ensure the bug cannot occur again. Link: https://lkml.kernel.org/r/20230615183323.87561-1-sj@kernel.org Signed-off-by: SeongJae Park Reviewed-by: Kefeng Wang Signed-off-by: Andrew Morton --- mm/damon/core-test.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'mm') diff --git a/mm/damon/core-test.h b/mm/damon/core-test.h index fae64d32b925..c11210124344 100644 --- a/mm/damon/core-test.h +++ b/mm/damon/core-test.h @@ -318,6 +318,29 @@ static void damon_test_update_monitoring_result(struct kunit *test) KUNIT_EXPECT_EQ(test, r->age, 20); } +static void damon_test_set_attrs(struct kunit *test) +{ + struct damon_ctx ctx; + struct damon_attrs valid_attrs = { + .min_nr_regions = 10, .max_nr_regions = 1000, + .sample_interval = 5000, .aggr_interval = 100000,}; + struct damon_attrs invalid_attrs; + + KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &valid_attrs), 0); + + invalid_attrs = valid_attrs; + invalid_attrs.min_nr_regions = 1; + KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); + + invalid_attrs = valid_attrs; + invalid_attrs.max_nr_regions = 9; + KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); + + invalid_attrs = valid_attrs; + invalid_attrs.aggr_interval = 4999; + KUNIT_EXPECT_EQ(test, damon_set_attrs(&ctx, &invalid_attrs), -EINVAL); +} + static struct kunit_case damon_test_cases[] = { KUNIT_CASE(damon_test_target), KUNIT_CASE(damon_test_regions), @@ -329,6 +352,7 @@ static struct kunit_case damon_test_cases[] = { KUNIT_CASE(damon_test_ops_registration), KUNIT_CASE(damon_test_set_regions), KUNIT_CASE(damon_test_update_monitoring_result), + KUNIT_CASE(damon_test_set_attrs), {}, }; -- cgit v1.2.3 From 025b7799b35d32e46988ba0614ea2f91b85d6375 Mon Sep 17 00:00:00 2001 From: ZhangPeng Date: Fri, 16 Jun 2023 14:30:30 +0800 Subject: mm/memcg: remove return value of mem_cgroup_scan_tasks() No user checks the return value of mem_cgroup_scan_tasks(). Make the return value void. Link: https://lkml.kernel.org/r/20230616063030.977586-1-zhangpeng362@huawei.com Signed-off-by: ZhangPeng Cc: Johannes Weiner Cc: Kefeng Wang Cc: Michal Hocko Cc: Muchun Song Cc: Nanyong Sun Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- include/linux/memcontrol.h | 7 +++---- mm/memcontrol.c | 9 ++++----- 2 files changed, 7 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c3d3a0c09315..5818af8eca5a 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -820,8 +820,8 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *, struct mem_cgroup *, struct mem_cgroup_reclaim_cookie *); void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *); -int mem_cgroup_scan_tasks(struct mem_cgroup *, - int (*)(struct task_struct *, void *), void *); +void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, + int (*)(struct task_struct *, void *), void *arg); static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) { @@ -1364,10 +1364,9 @@ static inline void mem_cgroup_iter_break(struct mem_cgroup *root, { } -static inline int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, +static inline void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, int (*fn)(struct task_struct *, void *), void *arg) { - return 0; } static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cf06b1c9b3bb..a834b1edcde9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1259,13 +1259,13 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg) * * This function iterates over tasks attached to @memcg or to any of its * descendants and calls @fn for each task. If @fn returns a non-zero - * value, the function breaks the iteration loop and returns the value. - * Otherwise, it will iterate over all tasks and return 0. + * value, the function breaks the iteration loop. Otherwise, it will iterate + * over all tasks and return 0. * * This function must not be called for the root memory cgroup. */ -int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, - int (*fn)(struct task_struct *, void *), void *arg) +void mem_cgroup_scan_tasks(struct mem_cgroup *memcg, + int (*fn)(struct task_struct *, void *), void *arg) { struct mem_cgroup *iter; int ret = 0; @@ -1285,7 +1285,6 @@ int mem_cgroup_scan_tasks(struct mem_cgroup *memcg, break; } } - return ret; } #ifdef CONFIG_DEBUG_VM -- cgit v1.2.3 From cf01724e2d73a90524450e3dd8798cfb9d7aca05 Mon Sep 17 00:00:00 2001 From: Miaohe Lin Date: Sat, 17 Jun 2023 11:46:22 +0800 Subject: mm: page_alloc: make compound_page_dtors static It's only used inside page_alloc.c now. So make it static and remove the declaration in mm.h. Link: https://lkml.kernel.org/r/20230617034622.1235913-1-linmiaohe@huawei.com Signed-off-by: Miaohe Lin Signed-off-by: Andrew Morton --- include/linux/mm.h | 1 - mm/page_alloc.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index a8baa34d0747..cf43deb25553 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1220,7 +1220,6 @@ enum compound_dtor_id { #endif NR_COMPOUND_DTORS, }; -extern compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS]; static inline void folio_set_compound_dtor(struct folio *folio, enum compound_dtor_id compound_dtor) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6a18f2232e3e..5b8a9d610b72 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -284,7 +284,7 @@ const char * const migratetype_names[MIGRATE_TYPES] = { #endif }; -compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { +static compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = { [NULL_COMPOUND_DTOR] = NULL, [COMPOUND_PAGE_DTOR] = free_compound_page, #ifdef CONFIG_HUGETLB_PAGE -- cgit v1.2.3 From 814bc1de03ea4361101408e63a68e4b82aef22cb Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Mon, 19 Jun 2023 13:38:21 -0600 Subject: mm/mglru: make memcg_lru->lock irq safe lru_gen_rotate_memcg() can happen in softirq if memory.soft_limit_in_bytes is set. This requires memcg_lru->lock to be irq safe. Lockdep warns on this. This problem only affects memcg v1. Link: https://lkml.kernel.org/r/20230619193821.2710944-1-yuzhao@google.com Fixes: e4dde56cd208 ("mm: multi-gen LRU: per-node lru_gen_folio lists") Signed-off-by: Yu Zhao Reported-by: syzbot+87c490fd2be656269b6a@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=87c490fd2be656269b6a Reviewed-by: Yosry Ahmed Cc: Signed-off-by: Andrew Morton --- mm/vmscan.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 5bf98d0a22c9..6114a1fc6c68 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -4728,10 +4728,11 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) { int seg; int old, new; + unsigned long flags; int bin = get_random_u32_below(MEMCG_NR_BINS); struct pglist_data *pgdat = lruvec_pgdat(lruvec); - spin_lock(&pgdat->memcg_lru.lock); + spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); @@ -4766,7 +4767,7 @@ static void lru_gen_rotate_memcg(struct lruvec *lruvec, int op) if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); - spin_unlock(&pgdat->memcg_lru.lock); + spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); } void lru_gen_online_memcg(struct mem_cgroup *memcg) @@ -4779,7 +4780,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg) struct pglist_data *pgdat = NODE_DATA(nid); struct lruvec *lruvec = get_lruvec(memcg, nid); - spin_lock(&pgdat->memcg_lru.lock); + spin_lock_irq(&pgdat->memcg_lru.lock); VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); @@ -4790,7 +4791,7 @@ void lru_gen_online_memcg(struct mem_cgroup *memcg) lruvec->lrugen.gen = gen; - spin_unlock(&pgdat->memcg_lru.lock); + spin_unlock_irq(&pgdat->memcg_lru.lock); } } @@ -4814,7 +4815,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg) struct pglist_data *pgdat = NODE_DATA(nid); struct lruvec *lruvec = get_lruvec(memcg, nid); - spin_lock(&pgdat->memcg_lru.lock); + spin_lock_irq(&pgdat->memcg_lru.lock); VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); @@ -4826,7 +4827,7 @@ void lru_gen_release_memcg(struct mem_cgroup *memcg) if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); - spin_unlock(&pgdat->memcg_lru.lock); + spin_unlock_irq(&pgdat->memcg_lru.lock); } } -- cgit v1.2.3 From 9721fd82351d47a37ba982272e128101f24efd7c Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 14 Jun 2023 16:40:20 +0800 Subject: mm: compaction: skip memory hole rapidly when isolating migratable pages On some machines, the normal zone can have a large memory hole like below memory layout, and we can see the range from 0x100000000 to 0x1800000000 is a hole. So when isolating some migratable pages, the scanner can meet the hole and it will take more time to skip the large hole. From my measurement, I can see the isolation scanner will take 80us ~ 100us to skip the large hole [0x100000000 - 0x1800000000]. So adding a new helper to fast search next online memory section to skip the large hole can help to find next suitable pageblock efficiently. With this patch, I can see the large hole scanning only takes < 1us. [ 0.000000] Zone ranges: [ 0.000000] DMA [mem 0x0000000040000000-0x00000000ffffffff] [ 0.000000] DMA32 empty [ 0.000000] Normal [mem 0x0000000100000000-0x0000001fa7ffffff] [ 0.000000] Movable zone start for each node [ 0.000000] Early memory node ranges [ 0.000000] node 0: [mem 0x0000000040000000-0x0000000fffffffff] [ 0.000000] node 0: [mem 0x0000001800000000-0x0000001fa3c7ffff] [ 0.000000] node 0: [mem 0x0000001fa3c80000-0x0000001fa3ffffff] [ 0.000000] node 0: [mem 0x0000001fa4000000-0x0000001fa402ffff] [ 0.000000] node 0: [mem 0x0000001fa4030000-0x0000001fa40effff] [ 0.000000] node 0: [mem 0x0000001fa40f0000-0x0000001fa73cffff] [ 0.000000] node 0: [mem 0x0000001fa73d0000-0x0000001fa745ffff] [ 0.000000] node 0: [mem 0x0000001fa7460000-0x0000001fa746ffff] [ 0.000000] node 0: [mem 0x0000001fa7470000-0x0000001fa758ffff] [ 0.000000] node 0: [mem 0x0000001fa7590000-0x0000001fa7ffffff] [baolin.wang@linux.alibaba.com: limit next_ptn to not exceed cc->free_pfn] Link: https://lkml.kernel.org/r/a1d859c28af0c7e85e91795e7473f553eb180a9d.1686813379.git.baolin.wang@linux.alibaba.com Link: https://lkml.kernel.org/r/75b4c8ca36bf44ad8c42bf0685ac19d272e426ec.1686705221.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Suggested-by: David Hildenbrand Acked-by: David Hildenbrand Acked-by: "Huang, Ying" Cc: Mel Gorman Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/compaction.c | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 6149a2d324be..0fb3b89b3967 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -229,6 +229,33 @@ static void reset_cached_positions(struct zone *zone) pageblock_start_pfn(zone_end_pfn(zone) - 1); } +#ifdef CONFIG_SPARSEMEM +/* + * If the PFN falls into an offline section, return the start PFN of the + * next online section. If the PFN falls into an online section or if + * there is no next online section, return 0. + */ +static unsigned long skip_offline_sections(unsigned long start_pfn) +{ + unsigned long start_nr = pfn_to_section_nr(start_pfn); + + if (online_section_nr(start_nr)) + return 0; + + while (++start_nr <= __highest_present_section_nr) { + if (online_section_nr(start_nr)) + return section_nr_to_pfn(start_nr); + } + + return 0; +} +#else +static unsigned long skip_offline_sections(unsigned long start_pfn) +{ + return 0; +} +#endif + /* * Compound pages of >= pageblock_order should consistently be skipped until * released. It is always pointless to compact pages of such order (if they are @@ -1955,8 +1982,14 @@ static isolate_migrate_t isolate_migratepages(struct compact_control *cc) page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, cc->zone); - if (!page) + if (!page) { + unsigned long next_pfn; + + next_pfn = skip_offline_sections(block_start_pfn); + if (next_pfn) + block_end_pfn = min(next_pfn, cc->free_pfn); continue; + } /* * If isolation recently failed, do not retry. Only check the -- cgit v1.2.3 From 726ccdba1521007fab4b2b7565d255fa0f2b770c Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 28 May 2023 00:25:31 +0900 Subject: kasan,kmsan: remove __GFP_KSWAPD_RECLAIM usage from kasan/kmsan syzbot is reporting lockdep warning in __stack_depot_save(), for the caller of __stack_depot_save() (i.e. __kasan_record_aux_stack() in this report) is responsible for masking __GFP_KSWAPD_RECLAIM flag in order not to wake kswapd which in turn wakes kcompactd. Since kasan/kmsan functions might be called with arbitrary locks held, mask __GFP_KSWAPD_RECLAIM flag from all GFP_NOWAIT/GFP_ATOMIC allocations in kasan/kmsan. Note that kmsan_save_stack_with_flags() is changed to mask both __GFP_DIRECT_RECLAIM flag and __GFP_KSWAPD_RECLAIM flag, for wakeup_kswapd() from wake_all_kswapds() from __alloc_pages_slowpath() calls wakeup_kcompactd() if __GFP_KSWAPD_RECLAIM flag is set and __GFP_DIRECT_RECLAIM flag is not set. Link: https://lkml.kernel.org/r/656cb4f5-998b-c8d7-3c61-c2d37aa90f9a@I-love.SAKURA.ne.jp Signed-off-by: Tetsuo Handa Reported-by: syzbot Closes: https://syzkaller.appspot.com/bug?extid=ece2915262061d6e0ac1 Reviewed-by: "Huang, Ying" Reviewed-by: Alexander Potapenko Cc: Andrey Konovalov Cc: Andrey Ryabinin Cc: Dmitry Vyukov Cc: Marco Elver Cc: Mel Gorman Cc: Vincenzo Frascino Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- mm/kasan/generic.c | 4 ++-- mm/kasan/tags.c | 2 +- mm/kmsan/core.c | 6 +++--- mm/kmsan/instrumentation.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c index 224d161a5a22..5b4c97baa656 100644 --- a/mm/kasan/generic.c +++ b/mm/kasan/generic.c @@ -488,7 +488,7 @@ static void __kasan_record_aux_stack(void *addr, bool can_alloc) return; alloc_meta->aux_stack[1] = alloc_meta->aux_stack[0]; - alloc_meta->aux_stack[0] = kasan_save_stack(GFP_NOWAIT, can_alloc); + alloc_meta->aux_stack[0] = kasan_save_stack(0, can_alloc); } void kasan_record_aux_stack(void *addr) @@ -518,7 +518,7 @@ void kasan_save_free_info(struct kmem_cache *cache, void *object) if (!free_meta) return; - kasan_set_track(&free_meta->free_track, GFP_NOWAIT); + kasan_set_track(&free_meta->free_track, 0); /* The object was freed and has free track set. */ *(u8 *)kasan_mem_to_shadow(object) = KASAN_SLAB_FREETRACK; } diff --git a/mm/kasan/tags.c b/mm/kasan/tags.c index 67a222586846..7dcfe341d48e 100644 --- a/mm/kasan/tags.c +++ b/mm/kasan/tags.c @@ -140,5 +140,5 @@ void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags) void kasan_save_free_info(struct kmem_cache *cache, void *object) { - save_stack_info(cache, object, GFP_NOWAIT, true); + save_stack_info(cache, object, 0, true); } diff --git a/mm/kmsan/core.c b/mm/kmsan/core.c index 7d1e4aa30bae..3adb4c1d3b19 100644 --- a/mm/kmsan/core.c +++ b/mm/kmsan/core.c @@ -74,7 +74,7 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags, nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0); /* Don't sleep. */ - flags &= ~__GFP_DIRECT_RECLAIM; + flags &= ~(__GFP_DIRECT_RECLAIM | __GFP_KSWAPD_RECLAIM); handle = __stack_depot_save(entries, nr_entries, flags, true); return stack_depot_set_extra_bits(handle, extra); @@ -245,7 +245,7 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id) extra_bits = kmsan_extra_bits(depth, uaf); entries[0] = KMSAN_CHAIN_MAGIC_ORIGIN; - entries[1] = kmsan_save_stack_with_flags(GFP_ATOMIC, 0); + entries[1] = kmsan_save_stack_with_flags(__GFP_HIGH, 0); entries[2] = id; /* * @entries is a local var in non-instrumented code, so KMSAN does not @@ -253,7 +253,7 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id) * positives when __stack_depot_save() passes it to instrumented code. */ kmsan_internal_unpoison_memory(entries, sizeof(entries), false); - handle = __stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC, + handle = __stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH, true); return stack_depot_set_extra_bits(handle, extra_bits); } diff --git a/mm/kmsan/instrumentation.c b/mm/kmsan/instrumentation.c index cf12e9616b24..cc3907a9c33a 100644 --- a/mm/kmsan/instrumentation.c +++ b/mm/kmsan/instrumentation.c @@ -282,7 +282,7 @@ void __msan_poison_alloca(void *address, uintptr_t size, char *descr) /* stack_depot_save() may allocate memory. */ kmsan_enter_runtime(); - handle = stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC); + handle = stack_depot_save(entries, ARRAY_SIZE(entries), __GFP_HIGH); kmsan_leave_runtime(); kmsan_internal_set_shadow_origin(address, size, -1, handle, -- cgit v1.2.3 From 91f0dccef141483f8399299c39ce9114d19bb147 Mon Sep 17 00:00:00 2001 From: Haifeng Xu Date: Mon, 19 Jun 2023 13:04:42 +0000 Subject: mm/memcontrol: do not tweak node in mem_cgroup_init() mem_cgroup_init() request for allocations from each possible node, and it's used to be a problem because NODE_DATA is not allocated for offline node. Things have already changed since commit 09f49dca570a9 ("mm: handle uninitialized numa nodes gracefully"), so it's unnecessary to check for !node_online nodes here. How to test? qemu-system-x86_64 \ -kernel vmlinux \ -initrd full.rootfs.cpio.gz \ -append "console=ttyS0,115200 root=/dev/ram0 nokaslr earlyprintk=serial oops=panic panic_on_warn" \ -drive format=qcow2,file=vm_disk.qcow2,media=disk,if=ide \ -enable-kvm \ -cpu host \ -m 8G,slots=2,maxmem=16G \ -smp cores=4,threads=1,sockets=2 \ -object memory-backend-ram,id=mem0,size=4G \ -object memory-backend-ram,id=mem1,size=4G \ -numa node,memdev=mem0,cpus=0-3,nodeid=0 \ -numa node,memdev=mem1,cpus=4-7,nodeid=1 \ -numa node,nodeid=2 \ -net nic,model=virtio,macaddr=52:54:00:12:34:58 \ -net user \ -nographic \ -rtc base=localtime \ -gdb tcp::6000 Guest state when booting: [ 0.048881] NUMA: Node 0 [mem 0x00000000-0x0009ffff] + [mem 0x00100000-0xbfffffff] -> [mem 0x00000000-0xbfffffff] [ 0.050489] NUMA: Node 0 [mem 0x00000000-0xbfffffff] + [mem 0x100000000-0x13fffffff] -> [mem 0x00000000-0x13fffffff] [ 0.052173] NODE_DATA(0) allocated [mem 0x13fffc000-0x13fffffff] [ 0.053164] NODE_DATA(1) allocated [mem 0x23fffa000-0x23fffdfff] [ 0.054187] Zone ranges: [ 0.054587] DMA [mem 0x0000000000001000-0x0000000000ffffff] [ 0.055551] DMA32 [mem 0x0000000001000000-0x00000000ffffffff] [ 0.056515] Normal [mem 0x0000000100000000-0x000000023fffffff] [ 0.057484] Movable zone start for each node [ 0.058149] Early memory node ranges [ 0.058705] node 0: [mem 0x0000000000001000-0x000000000009efff] [ 0.059679] node 0: [mem 0x0000000000100000-0x00000000bffdffff] [ 0.060659] node 0: [mem 0x0000000100000000-0x000000013fffffff] [ 0.061649] node 1: [mem 0x0000000140000000-0x000000023fffffff] [ 0.062638] Initmem setup node 0 [mem 0x0000000000001000-0x000000013fffffff] [ 0.063745] Initmem setup node 1 [mem 0x0000000140000000-0x000000023fffffff] [ 0.064855] DMA zone: 158 reserved pages exceeds freesize 0 [ 0.065746] Initializing node 2 as memoryless [ 0.066437] Initmem setup node 2 as memoryless [ 0.067132] DMA zone: 158 reserved pages exceeds freesize 0 [ 0.068037] On node 0, zone DMA: 1 pages in unavailable ranges [ 0.068265] On node 0, zone DMA: 97 pages in unavailable ranges [ 0.124755] On node 0, zone Normal: 32 pages in unavailable ranges cat /sys/devices/system/node/online 0-1 cat /sys/devices/system/node/possible 0-2 Link: https://lkml.kernel.org/r/20230619130442.2487-1-haifeng.xu@shopee.com Signed-off-by: Haifeng Xu Acked-by: Michal Hocko Cc: Johannes Weiner Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/memcontrol.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a834b1edcde9..e8ca4bdcb03c 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -7376,8 +7376,7 @@ static int __init mem_cgroup_init(void) for_each_node(node) { struct mem_cgroup_tree_per_node *rtpn; - rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, - node_online(node) ? node : NUMA_NO_NODE); + rtpn = kzalloc_node(sizeof(*rtpn), GFP_KERNEL, node); rtpn->rb_root = RB_ROOT; rtpn->rb_rightmost = NULL; -- cgit v1.2.3 From 0b52c420350e8f9873ba62768cd8246827184408 Mon Sep 17 00:00:00 2001 From: Jan Glauber Date: Mon, 19 Jun 2023 12:33:51 +0200 Subject: mm: fix shmem THP counters on migration The per node numa_stat values for shmem don't change on page migration for THP: grep shmem /sys/fs/cgroup/machine.slice/.../memory.numa_stat: shmem N0=1092616192 N1=10485760 shmem_thp N0=1092616192 N1=10485760 migratepages 9181 0 1: shmem N0=0 N1=1103101952 shmem_thp N0=1092616192 N1=10485760 Fix that by updating shmem_thp counters likewise to shmem counters on page migration. [jglauber@digitalocean.com: use folio_test_pmd_mappable instead of folio_test_transhuge] Link: https://lkml.kernel.org/r/20230622094720.510540-1-jglauber@digitalocean.com Link: https://lkml.kernel.org/r/20230619103351.234837-1-jglauber@digitalocean.com Signed-off-by: Jan Glauber Reviewed-by: Baolin Wang Cc: "Huang, Ying" Signed-off-by: Andrew Morton --- mm/migrate.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'mm') diff --git a/mm/migrate.c b/mm/migrate.c index ce35afdbc1e3..eca3bf0e93b8 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -486,6 +486,11 @@ int folio_migrate_mapping(struct address_space *mapping, if (folio_test_swapbacked(folio) && !folio_test_swapcache(folio)) { __mod_lruvec_state(old_lruvec, NR_SHMEM, -nr); __mod_lruvec_state(new_lruvec, NR_SHMEM, nr); + + if (folio_test_pmd_mappable(folio)) { + __mod_lruvec_state(old_lruvec, NR_SHMEM_THPS, -nr); + __mod_lruvec_state(new_lruvec, NR_SHMEM_THPS, nr); + } } #ifdef CONFIG_SWAP if (folio_test_swapcache(folio)) { -- cgit v1.2.3 From 9883c7f84053cec2826ca3c56254601b5ce9cdbe Mon Sep 17 00:00:00 2001 From: Jason Gunthorpe Date: Mon, 19 Jun 2023 15:27:25 -0300 Subject: mm/gup: do not return 0 from pin_user_pages_fast() for bad args These routines are not intended to return zero, the callers cannot do anything sane with a 0 return. They should return an error which means future calls to GUP will not succeed, or they should return some non-zero number of pinned pages which means GUP should be called again. If start + nr_pages overflows it should return -EOVERFLOW to signal the arguments are invalid. Syzkaller keeps tripping on this when fuzzing GUP arguments. Link: https://lkml.kernel.org/r/0-v1-3d5ed1f20d50+104-gup_overflow_jgg@nvidia.com Signed-off-by: Jason Gunthorpe Reported-by: syzbot+353c7be4964c6253f24a@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/000000000000094fdd05faa4d3a4@google.com Reviewed-by: John Hubbard Reviewed-by: Lorenzo Stoakes Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- mm/gup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/gup.c b/mm/gup.c index ce14d4d28503..a8336b39d6b5 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -3080,7 +3080,7 @@ static int internal_get_user_pages_fast(unsigned long start, start = untagged_addr(start) & PAGE_MASK; len = nr_pages << PAGE_SHIFT; if (check_add_overflow(start, len, &end)) - return 0; + return -EOVERFLOW; if (end > TASK_SIZE_MAX) return -EFAULT; if (unlikely(!access_ok((void __user *)start, len))) -- cgit v1.2.3 From 61167ad5fecdeaa037f3df1ba354dddd5f66a1ed Mon Sep 17 00:00:00 2001 From: Yajun Deng Date: Mon, 19 Jun 2023 10:34:06 +0800 Subject: mm: pass nid to reserve_bootmem_region() early_pfn_to_nid() is called frequently in init_reserved_page(), it returns the node id of the PFN. These PFN are probably from the same memory region, they have the same node id. It's not necessary to call early_pfn_to_nid() for each PFN. Pass nid to reserve_bootmem_region() and drop the call to early_pfn_to_nid() in init_reserved_page(). Also, set nid on all reserved pages before doing this, as some reserved memory regions may not be set nid. The most beneficial function is memmap_init_reserved_pages() if CONFIG_DEFERRED_STRUCT_PAGE_INIT is enabled. The following data was tested on an x86 machine with 190GB of RAM. before: memmap_init_reserved_pages() 67ms after: memmap_init_reserved_pages() 20ms Link: https://lkml.kernel.org/r/20230619023406.424298-1-yajun.deng@linux.dev Signed-off-by: Yajun Deng Reviewed-by: Mike Rapoport (IBM) Signed-off-by: Andrew Morton --- include/linux/mm.h | 3 ++- mm/memblock.c | 31 +++++++++++++++++++++---------- mm/mm_init.c | 30 +++++++++++++++++------------- 3 files changed, 40 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/include/linux/mm.h b/include/linux/mm.h index cf43deb25553..9ecb8b9c07f6 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2940,7 +2940,8 @@ extern unsigned long free_reserved_area(void *start, void *end, extern void adjust_managed_page_count(struct page *page, long count); -extern void reserve_bootmem_region(phys_addr_t start, phys_addr_t end); +extern void reserve_bootmem_region(phys_addr_t start, + phys_addr_t end, int nid); /* Free the reserved page into the buddy system, so it gets managed. */ static inline void free_reserved_page(struct page *page) diff --git a/mm/memblock.c b/mm/memblock.c index da4264528e1e..46739551d4d1 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -2082,19 +2082,30 @@ static void __init memmap_init_reserved_pages(void) { struct memblock_region *region; phys_addr_t start, end; - u64 i; + int nid; + + /* + * set nid on all reserved pages and also treat struct + * pages for the NOMAP regions as PageReserved + */ + for_each_mem_region(region) { + nid = memblock_get_region_node(region); + start = region->base; + end = start + region->size; + + if (memblock_is_nomap(region)) + reserve_bootmem_region(start, end, nid); + + memblock_set_node(start, end, &memblock.reserved, nid); + } /* initialize struct pages for the reserved regions */ - for_each_reserved_mem_range(i, &start, &end) - reserve_bootmem_region(start, end); + for_each_reserved_mem_region(region) { + nid = memblock_get_region_node(region); + start = region->base; + end = start + region->size; - /* and also treat struct pages for the NOMAP regions as PageReserved */ - for_each_mem_region(region) { - if (memblock_is_nomap(region)) { - start = region->base; - end = start + region->size; - reserve_bootmem_region(start, end); - } + reserve_bootmem_region(start, end, nid); } } diff --git a/mm/mm_init.c b/mm/mm_init.c index 122e9bf3fa73..7ffa609673ea 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -646,10 +646,8 @@ static inline void pgdat_set_deferred_range(pg_data_t *pgdat) } /* Returns true if the struct page for the pfn is initialised */ -static inline bool __meminit early_page_initialised(unsigned long pfn) +static inline bool __meminit early_page_initialised(unsigned long pfn, int nid) { - int nid = early_pfn_to_nid(pfn); - if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) return false; @@ -695,15 +693,14 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn) return false; } -static void __meminit init_reserved_page(unsigned long pfn) +static void __meminit init_reserved_page(unsigned long pfn, int nid) { pg_data_t *pgdat; - int nid, zid; + int zid; - if (early_page_initialised(pfn)) + if (early_page_initialised(pfn, nid)) return; - nid = early_pfn_to_nid(pfn); pgdat = NODE_DATA(nid); for (zid = 0; zid < MAX_NR_ZONES; zid++) { @@ -717,7 +714,7 @@ static void __meminit init_reserved_page(unsigned long pfn) #else static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {} -static inline bool early_page_initialised(unsigned long pfn) +static inline bool early_page_initialised(unsigned long pfn, int nid) { return true; } @@ -727,7 +724,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) return false; } -static inline void init_reserved_page(unsigned long pfn) +static inline void init_reserved_page(unsigned long pfn, int nid) { } #endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ @@ -738,7 +735,8 @@ static inline void init_reserved_page(unsigned long pfn) * marks the pages PageReserved. The remaining valid pages are later * sent to the buddy page allocator. */ -void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) +void __meminit reserve_bootmem_region(phys_addr_t start, + phys_addr_t end, int nid) { unsigned long start_pfn = PFN_DOWN(start); unsigned long end_pfn = PFN_UP(end); @@ -747,7 +745,7 @@ void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end) if (pfn_valid(start_pfn)) { struct page *page = pfn_to_page(start_pfn); - init_reserved_page(start_pfn); + init_reserved_page(start_pfn, nid); /* Avoid false-positive PageTail() */ INIT_LIST_HEAD(&page->lru); @@ -2572,8 +2570,14 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) void __init memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order) { - if (!early_page_initialised(pfn)) - return; + + if (IS_ENABLED(CONFIG_DEFERRED_STRUCT_PAGE_INIT)) { + int nid = early_pfn_to_nid(pfn); + + if (!early_page_initialised(pfn, nid)) + return; + } + if (!kmsan_memblock_free_pages(page, order)) { /* KMSAN will take care of these pages. */ return; -- cgit v1.2.3 From 3fda49e89f1702df6bb6a2470076b1a7bf3a29de Mon Sep 17 00:00:00 2001 From: Hugh Dickins Date: Mon, 19 Jun 2023 23:50:00 -0700 Subject: mm/swapfile: delete outdated pte_offset_map() comment Delete a triply out-of-date comment from add_swap_count_continuation(): 1. vmalloc_to_page() changed from pte_offset_map() to pte_offset_kernel() 2. pte_offset_map() changed from using kmap_atomic() to kmap_local_page() 3. kmap_atomic() changed from using fixed FIX_KMAP addresses in 2.6.37. Link: https://lkml.kernel.org/r/9022632b-ba9d-8cb0-c25-4be9786481b5@google.com Signed-off-by: Hugh Dickins Signed-off-by: Andrew Morton --- mm/swapfile.c | 5 ----- 1 file changed, 5 deletions(-) (limited to 'mm') diff --git a/mm/swapfile.c b/mm/swapfile.c index a6945c2e0d03..92ed7cba2268 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -3456,11 +3456,6 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) goto out; } - /* - * We are fortunate that although vmalloc_to_page uses pte_offset_map, - * no architecture is using highmem pages for kernel page tables: so it - * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps. - */ head = vmalloc_to_page(si->swap_map + offset); offset &= ~PAGE_MASK; -- cgit v1.2.3 From b5665cf936bf3955fec18c09a6aa53c8a57ea8b7 Mon Sep 17 00:00:00 2001 From: Ivan Orlov Date: Tue, 20 Jun 2023 20:33:15 +0200 Subject: mm: backing-dev: make bdi_class a static const structure Now that the driver core allows for struct class to be in read-only memory, move the bdi_class structure to be declared at build time placing it into read-only memory, instead of having to be dynamically allocated at load time. Link: https://lkml.kernel.org/r/20230620183314.682822-2-gregkh@linuxfoundation.org Signed-off-by: Ivan Orlov Signed-off-by: Greg Kroah-Hartman Suggested-by: Greg Kroah-Hartman Cc: Jens Axboe Cc: Christoph Hellwig Signed-off-by: Andrew Morton --- mm/backing-dev.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) (limited to 'mm') diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 7da9727fcdf3..3ffc3cfa7a14 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -20,7 +20,6 @@ struct backing_dev_info noop_backing_dev_info; EXPORT_SYMBOL_GPL(noop_backing_dev_info); -static struct class *bdi_class; static const char *bdi_unknown_name = "(unknown)"; /* @@ -345,13 +344,19 @@ static struct attribute *bdi_dev_attrs[] = { }; ATTRIBUTE_GROUPS(bdi_dev); +static const struct class bdi_class = { + .name = "bdi", + .dev_groups = bdi_dev_groups, +}; + static __init int bdi_class_init(void) { - bdi_class = class_create("bdi"); - if (IS_ERR(bdi_class)) - return PTR_ERR(bdi_class); + int ret; + + ret = class_register(&bdi_class); + if (ret) + return ret; - bdi_class->dev_groups = bdi_dev_groups; bdi_debug_init(); return 0; @@ -1001,7 +1006,7 @@ int bdi_register_va(struct backing_dev_info *bdi, const char *fmt, va_list args) return 0; vsnprintf(bdi->dev_name, sizeof(bdi->dev_name), fmt, args); - dev = device_create(bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); + dev = device_create(&bdi_class, NULL, MKDEV(0, 0), bdi, bdi->dev_name); if (IS_ERR(dev)) return PTR_ERR(dev); -- cgit v1.2.3 From 1bf61092bc90a9054d01cfdf35b42c1bf6fe47c7 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Wed, 21 Jun 2023 16:14:28 +0800 Subject: mm: page_alloc: use the correct type of list for free pages Commit bf75f200569d ("mm/page_alloc: add page->buddy_list and page->pcp_list") introduces page->buddy_list and page->pcp_list as a union with page->lru, but missed to change get_page_from_free_area() to use page->buddy_list to clarify the correct type of list for a free page. Link: https://lkml.kernel.org/r/7e7ab533247d40c0ea0373c18a6a48e5667f9e10.1687333557.git.baolin.wang@linux.alibaba.com Signed-off-by: Baolin Wang Acked-by: Mel Gorman Reviewed-by: David Hildenbrand Signed-off-by: Andrew Morton --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5b8a9d610b72..158f24d186e6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -727,7 +727,7 @@ static inline struct page *get_page_from_free_area(struct free_area *area, int migratetype) { return list_first_entry_or_null(&area->free_list[migratetype], - struct page, lru); + struct page, buddy_list); } /* -- cgit v1.2.3 From e0b72c14d8dcc9477e580c261041dae86d4906fe Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:50 +0100 Subject: mm: remove check_move_unevictable_pages() All callers have now been converted to call check_move_unevictable_folios(). Link: https://lkml.kernel.org/r/20230621164557.3510324-7-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/swap.h | 1 - mm/vmscan.c | 17 ----------------- 2 files changed, 18 deletions(-) (limited to 'mm') diff --git a/include/linux/swap.h b/include/linux/swap.h index ce7e82cf787f..456546443f1f 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -439,7 +439,6 @@ static inline bool node_reclaim_enabled(void) } void check_move_unevictable_folios(struct folio_batch *fbatch); -void check_move_unevictable_pages(struct pagevec *pvec); extern void __meminit kswapd_run(int nid); extern void __meminit kswapd_stop(int nid); diff --git a/mm/vmscan.c b/mm/vmscan.c index 27f90896f789..049342b6317c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -8075,23 +8075,6 @@ int node_reclaim(struct pglist_data *pgdat, gfp_t gfp_mask, unsigned int order) } #endif -void check_move_unevictable_pages(struct pagevec *pvec) -{ - struct folio_batch fbatch; - unsigned i; - - folio_batch_init(&fbatch); - for (i = 0; i < pvec->nr; i++) { - struct page *page = pvec->pages[i]; - - if (PageTransTail(page)) - continue; - folio_batch_add(&fbatch, page_folio(page)); - } - check_move_unevictable_folios(&fbatch); -} -EXPORT_SYMBOL_GPL(check_move_unevictable_pages); - /** * check_move_unevictable_folios - Move evictable folios to appropriate zone * lru list -- cgit v1.2.3 From 1e0877d58b1e22517d8939b22b963c043e6c63fd Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:54 +0100 Subject: mm: remove struct pagevec All users are now converted to use the folio_batch so we can get rid of this data structure. Link: https://lkml.kernel.org/r/20230621164557.3510324-11-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- include/linux/pagevec.h | 63 ++++--------------------------------------------- mm/swap.c | 18 +++++++------- 2 files changed, 13 insertions(+), 68 deletions(-) (limited to 'mm') diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 3a9d29dd28a3..87cc678adc85 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h @@ -3,65 +3,18 @@ * include/linux/pagevec.h * * In many places it is efficient to batch an operation up against multiple - * pages. A pagevec is a multipage container which is used for that. + * folios. A folio_batch is a container which is used for that. */ #ifndef _LINUX_PAGEVEC_H #define _LINUX_PAGEVEC_H -#include +#include -/* 15 pointers + header align the pagevec structure to a power of two */ +/* 15 pointers + header align the folio_batch structure to a power of two */ #define PAGEVEC_SIZE 15 -struct page; struct folio; -struct address_space; - -/* Layout must match folio_batch */ -struct pagevec { - unsigned char nr; - bool percpu_pvec_drained; - struct page *pages[PAGEVEC_SIZE]; -}; - -void __pagevec_release(struct pagevec *pvec); - -static inline void pagevec_init(struct pagevec *pvec) -{ - pvec->nr = 0; - pvec->percpu_pvec_drained = false; -} - -static inline void pagevec_reinit(struct pagevec *pvec) -{ - pvec->nr = 0; -} - -static inline unsigned pagevec_count(struct pagevec *pvec) -{ - return pvec->nr; -} - -static inline unsigned pagevec_space(struct pagevec *pvec) -{ - return PAGEVEC_SIZE - pvec->nr; -} - -/* - * Add a page to a pagevec. Returns the number of slots still available. - */ -static inline unsigned pagevec_add(struct pagevec *pvec, struct page *page) -{ - pvec->pages[pvec->nr++] = page; - return pagevec_space(pvec); -} - -static inline void pagevec_release(struct pagevec *pvec) -{ - if (pagevec_count(pvec)) - __pagevec_release(pvec); -} /** * struct folio_batch - A collection of folios. @@ -78,11 +31,6 @@ struct folio_batch { struct folio *folios[PAGEVEC_SIZE]; }; -/* Layout must match pagevec */ -static_assert(sizeof(struct pagevec) == sizeof(struct folio_batch)); -static_assert(offsetof(struct pagevec, pages) == - offsetof(struct folio_batch, folios)); - /** * folio_batch_init() - Initialise a batch of folios * @fbatch: The folio batch. @@ -127,10 +75,7 @@ static inline unsigned folio_batch_add(struct folio_batch *fbatch, return folio_batch_space(fbatch); } -static inline void __folio_batch_release(struct folio_batch *fbatch) -{ - __pagevec_release((struct pagevec *)fbatch); -} +void __folio_batch_release(struct folio_batch *pvec); static inline void folio_batch_release(struct folio_batch *fbatch) { diff --git a/mm/swap.c b/mm/swap.c index 423199ee8478..10348c1cf9c5 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -1044,25 +1044,25 @@ void release_pages(release_pages_arg arg, int nr) EXPORT_SYMBOL(release_pages); /* - * The pages which we're about to release may be in the deferred lru-addition + * The folios which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's - * OK from a correctness point of view but is inefficient - those pages may be + * OK from a correctness point of view but is inefficient - those folios may be * cache-warm and we want to give them back to the page allocator ASAP. * - * So __pagevec_release() will drain those queues here. + * So __folio_batch_release() will drain those queues here. * folio_batch_move_lru() calls folios_put() directly to avoid * mutual recursion. */ -void __pagevec_release(struct pagevec *pvec) +void __folio_batch_release(struct folio_batch *fbatch) { - if (!pvec->percpu_pvec_drained) { + if (!fbatch->percpu_pvec_drained) { lru_add_drain(); - pvec->percpu_pvec_drained = true; + fbatch->percpu_pvec_drained = true; } - release_pages(pvec->pages, pagevec_count(pvec)); - pagevec_reinit(pvec); + release_pages(fbatch->folios, folio_batch_count(fbatch)); + folio_batch_reinit(fbatch); } -EXPORT_SYMBOL(__pagevec_release); +EXPORT_SYMBOL(__folio_batch_release); /** * folio_batch_remove_exceptionals() - Prune non-folios from a batch. -- cgit v1.2.3 From 1a0fc811f5f5addf54499826bd1b6e34e917491c Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:55 +0100 Subject: mm: rename invalidate_mapping_pagevec to mapping_try_invalidate We don't use pagevecs for the LRU cache any more, and we don't know that the failed invalidations were due to the folio being in an LRU cache. So rename it to be more accurate. Link: https://lkml.kernel.org/r/20230621164557.3510324-12-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/fadvise.c | 16 +++++++--------- mm/internal.h | 4 ++-- mm/truncate.c | 25 ++++++++++++------------- 3 files changed, 21 insertions(+), 24 deletions(-) (limited to 'mm') diff --git a/mm/fadvise.c b/mm/fadvise.c index fb7c5f43fd2a..f684ffd7f9c9 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -143,7 +143,7 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice) } if (end_index >= start_index) { - unsigned long nr_pagevec = 0; + unsigned long nr_failed = 0; /* * It's common to FADV_DONTNEED right after @@ -156,17 +156,15 @@ int generic_fadvise(struct file *file, loff_t offset, loff_t len, int advice) */ lru_add_drain(); - invalidate_mapping_pagevec(mapping, - start_index, end_index, - &nr_pagevec); + mapping_try_invalidate(mapping, start_index, end_index, + &nr_failed); /* - * If fewer pages were invalidated than expected then - * it is possible that some of the pages were on - * a per-cpu pagevec for a remote CPU. Drain all - * pagevecs and try again. + * The failures may be due to the folio being + * in the LRU cache of a remote CPU. Drain all + * caches and try again. */ - if (nr_pagevec) { + if (nr_failed) { lru_add_drain_all(); invalidate_mapping_pages(mapping, start_index, end_index); diff --git a/mm/internal.h b/mm/internal.h index b0d8778dd910..a19255615bf2 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -133,8 +133,8 @@ int truncate_inode_folio(struct address_space *mapping, struct folio *folio); bool truncate_inode_partial_folio(struct folio *folio, loff_t start, loff_t end); long invalidate_inode_page(struct page *page); -unsigned long invalidate_mapping_pagevec(struct address_space *mapping, - pgoff_t start, pgoff_t end, unsigned long *nr_pagevec); +unsigned long mapping_try_invalidate(struct address_space *mapping, + pgoff_t start, pgoff_t end, unsigned long *nr_failed); /** * folio_evictable - Test whether a folio is evictable. diff --git a/mm/truncate.c b/mm/truncate.c index 86de31ed4d32..4a917570887f 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -486,18 +486,17 @@ void truncate_inode_pages_final(struct address_space *mapping) EXPORT_SYMBOL(truncate_inode_pages_final); /** - * invalidate_mapping_pagevec - Invalidate all the unlocked pages of one inode - * @mapping: the address_space which holds the pages to invalidate + * mapping_try_invalidate - Invalidate all the evictable folios of one inode + * @mapping: the address_space which holds the folios to invalidate * @start: the offset 'from' which to invalidate * @end: the offset 'to' which to invalidate (inclusive) - * @nr_pagevec: invalidate failed page number for caller + * @nr_failed: How many folio invalidations failed * - * This helper is similar to invalidate_mapping_pages(), except that it accounts - * for pages that are likely on a pagevec and counts them in @nr_pagevec, which - * will be used by the caller. + * This function is similar to invalidate_mapping_pages(), except that it + * returns the number of folios which could not be evicted in @nr_failed. */ -unsigned long invalidate_mapping_pagevec(struct address_space *mapping, - pgoff_t start, pgoff_t end, unsigned long *nr_pagevec) +unsigned long mapping_try_invalidate(struct address_space *mapping, + pgoff_t start, pgoff_t end, unsigned long *nr_failed) { pgoff_t indices[PAGEVEC_SIZE]; struct folio_batch fbatch; @@ -527,9 +526,9 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping, */ if (!ret) { deactivate_file_folio(folio); - /* It is likely on the pagevec of a remote CPU */ - if (nr_pagevec) - (*nr_pagevec)++; + /* Likely in the lru cache of a remote CPU */ + if (nr_failed) + (*nr_failed)++; } count += ret; } @@ -552,12 +551,12 @@ unsigned long invalidate_mapping_pagevec(struct address_space *mapping, * If you want to remove all the pages of one inode, regardless of * their use and writeback state, use truncate_inode_pages(). * - * Return: the number of the cache entries that were invalidated + * Return: The number of indices that had their contents invalidated */ unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end) { - return invalidate_mapping_pagevec(mapping, start, end, NULL); + return mapping_try_invalidate(mapping, start, end, NULL); } EXPORT_SYMBOL(invalidate_mapping_pages); -- cgit v1.2.3 From 1fec6890bf2247ecc93f5491c2d3f33c333d5c6e Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:56 +0100 Subject: mm: remove references to pagevec Most of these should just refer to the LRU cache rather than the data structure used to implement the LRU cache. Link: https://lkml.kernel.org/r/20230621164557.3510324-13-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/huge_memory.c | 2 +- mm/khugepaged.c | 6 +++--- mm/ksm.c | 6 +++--- mm/memory.c | 6 +++--- mm/migrate_device.c | 2 +- mm/swap.c | 2 +- mm/truncate.c | 2 +- 7 files changed, 13 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index e94fe292f30a..eb3678360b97 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1344,7 +1344,7 @@ vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf) /* * See do_wp_page(): we can only reuse the folio exclusively if * there are no additional references. Note that we always drain - * the LRU pagevecs immediately after adding a THP. + * the LRU cache immediately after adding a THP. */ if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio) * folio_nr_pages(folio)) diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 5ef1e08b2a06..3beb4ad2ee5e 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1051,7 +1051,7 @@ static int __collapse_huge_page_swapin(struct mm_struct *mm, if (pte) pte_unmap(pte); - /* Drain LRU add pagevec to remove extra pin on the swapped in pages */ + /* Drain LRU cache to remove extra pin on the swapped in pages */ if (swapped_in) lru_add_drain(); @@ -1972,7 +1972,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, result = SCAN_FAIL; goto xa_unlocked; } - /* drain pagevecs to help isolate_lru_page() */ + /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); page = folio_file_page(folio, index); } else if (trylock_page(page)) { @@ -1988,7 +1988,7 @@ static int collapse_file(struct mm_struct *mm, unsigned long addr, page_cache_sync_readahead(mapping, &file->f_ra, file, index, end - index); - /* drain pagevecs to help isolate_lru_page() */ + /* drain lru cache to help isolate_lru_page() */ lru_add_drain(); page = find_lock_page(mapping, index); if (unlikely(page == NULL)) { diff --git a/mm/ksm.c b/mm/ksm.c index d995779dc1fe..ba266359da55 100644 --- a/mm/ksm.c +++ b/mm/ksm.c @@ -932,7 +932,7 @@ static int remove_stable_node(struct ksm_stable_node *stable_node) * The stable node did not yet appear stale to get_ksm_page(), * since that allows for an unmapped ksm page to be recognized * right up until it is freed; but the node is safe to remove. - * This page might be in a pagevec waiting to be freed, + * This page might be in an LRU cache waiting to be freed, * or it might be PageSwapCache (perhaps under writeback), * or it might have been removed from swapcache a moment ago. */ @@ -2303,8 +2303,8 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page) trace_ksm_start_scan(ksm_scan.seqnr, ksm_rmap_items); /* - * A number of pages can hang around indefinitely on per-cpu - * pagevecs, raised page count preventing write_protect_page + * A number of pages can hang around indefinitely in per-cpu + * LRU cache, raised page count preventing write_protect_page * from merging them. Though it doesn't really matter much, * it is puzzling to see some stuck in pages_volatile until * other activity jostles them out, and they also prevented diff --git a/mm/memory.c b/mm/memory.c index 3d78b552866d..f758f59f3704 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3401,8 +3401,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) goto copy; if (!folio_test_lru(folio)) /* - * Note: We cannot easily detect+handle references from - * remote LRU pagevecs or references to LRU folios. + * We cannot easily detect+handle references from + * remote LRU caches or references to LRU folios. */ lru_add_drain(); if (folio_ref_count(folio) > 1 + folio_test_swapcache(folio)) @@ -3880,7 +3880,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf) * If we want to map a page that's in the swapcache writable, we * have to detect via the refcount if we're really the exclusive * owner. Try removing the extra reference from the local LRU - * pagevecs if required. + * caches if required. */ if ((vmf->flags & FAULT_FLAG_WRITE) && folio == swapcache && !folio_test_ksm(folio) && !folio_test_lru(folio)) diff --git a/mm/migrate_device.c b/mm/migrate_device.c index 02d272b909b5..8365158460ed 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -376,7 +376,7 @@ static unsigned long migrate_device_unmap(unsigned long *src_pfns, /* ZONE_DEVICE pages are not on LRU */ if (!is_zone_device_page(page)) { if (!PageLRU(page) && allow_drain) { - /* Drain CPU's pagevec */ + /* Drain CPU's lru cache */ lru_add_drain_all(); allow_drain = false; } diff --git a/mm/swap.c b/mm/swap.c index 10348c1cf9c5..cd8f0150ba3a 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -76,7 +76,7 @@ static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = { /* * This path almost never happens for VM activity - pages are normally freed - * via pagevecs. But it gets used by networking - and for compound pages. + * in batches. But it gets used by networking - and for compound pages. */ static void __page_cache_release(struct folio *folio) { diff --git a/mm/truncate.c b/mm/truncate.c index 4a917570887f..95d1291d269b 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -565,7 +565,7 @@ EXPORT_SYMBOL(invalidate_mapping_pages); * refcount. We do this because invalidate_inode_pages2() needs stronger * invalidation guarantees, and cannot afford to leave pages behind because * shrink_page_list() has a temp ref on them, or because they're transiently - * sitting in the folio_add_lru() pagevecs. + * sitting in the folio_add_lru() caches. */ static int invalidate_complete_folio2(struct address_space *mapping, struct folio *folio) -- cgit v1.2.3 From 994ec4e29b3de188d11fe60d17403285fcc8917a Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Wed, 21 Jun 2023 17:45:57 +0100 Subject: mm: remove unnecessary pagevec includes These files no longer need pagevec.h, mostly due to function declarations being moved out of it. Link: https://lkml.kernel.org/r/20230621164557.3510324-14-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/fadvise.c | 1 - mm/memory_hotplug.c | 1 - mm/migrate.c | 1 - mm/readahead.c | 1 - mm/swap_state.c | 1 - 5 files changed, 5 deletions(-) (limited to 'mm') diff --git a/mm/fadvise.c b/mm/fadvise.c index f684ffd7f9c9..6c39d42f16dc 100644 --- a/mm/fadvise.c +++ b/mm/fadvise.c @@ -14,7 +14,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 35db4108bb15..3f231cf1b410 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -13,7 +13,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/migrate.c b/mm/migrate.c index eca3bf0e93b8..24baad2571e3 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -21,7 +21,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/readahead.c b/mm/readahead.c index 47afbca1d122..a9c999aa19af 100644 --- a/mm/readahead.c +++ b/mm/readahead.c @@ -120,7 +120,6 @@ #include #include #include -#include #include #include #include diff --git a/mm/swap_state.c b/mm/swap_state.c index 4a5c7b748051..f8ea7015bad4 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include #include -- cgit v1.2.3 From 18a937076c6991a21b88d99af95b779b5027b29b Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 21 Jun 2023 09:30:09 +0000 Subject: mm: zswap: fix double invalidate with exclusive loads If exclusive loads are enabled for zswap, we invalidate the entry before returning from zswap_frontswap_load(), after dropping the local reference. However, the tree lock is dropped during decompression after the local reference is acquired, so the entry could be invalidated before we drop the local ref. If this happens, the entry is freed once we drop the local ref, and zswap_invalidate_entry() tries to invalidate an already freed entry. Fix this by: (a) Making sure zswap_invalidate_entry() is always called with a local ref held, to avoid being called on a freed entry. (b) Making sure zswap_invalidate_entry() only drops the ref if the entry was actually on the rbtree. Otherwise, another invalidation could have already happened, and the initial ref is already dropped. With these changes, there is no need to check that there is no need to make sure the entry still exists in the tree in zswap_reclaim_entry() before invalidating it, as zswap_reclaim_entry() will make this check internally. Link: https://lkml.kernel.org/r/20230621093009.637544-1-yosryahmed@google.com Fixes: b9c91c43412f ("mm: zswap: support exclusive loads") Signed-off-by: Yosry Ahmed Reported-by: Hyeonggon Yoo <42.hyeyoo@gmail.com> Cc: Dan Streetman Cc: Domenico Cerasuolo Cc: Johannes Weiner Cc: Konrad Rzeszutek Wilk Cc: Nhat Pham Cc: Seth Jennings Cc: Vitaly Wool Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/zswap.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'mm') diff --git a/mm/zswap.c b/mm/zswap.c index 87b204233115..62195f72bf56 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -355,12 +355,14 @@ static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry, return 0; } -static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) +static bool zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry) { if (!RB_EMPTY_NODE(&entry->rbnode)) { rb_erase(&entry->rbnode, root); RB_CLEAR_NODE(&entry->rbnode); + return true; } + return false; } /* @@ -599,14 +601,16 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor) return NULL; } +/* + * If the entry is still valid in the tree, drop the initial ref and remove it + * from the tree. This function must be called with an additional ref held, + * otherwise it may race with another invalidation freeing the entry. + */ static void zswap_invalidate_entry(struct zswap_tree *tree, struct zswap_entry *entry) { - /* remove from rbtree */ - zswap_rb_erase(&tree->rbroot, entry); - - /* drop the initial reference from entry creation */ - zswap_entry_put(tree, entry); + if (zswap_rb_erase(&tree->rbroot, entry)) + zswap_entry_put(tree, entry); } static int zswap_reclaim_entry(struct zswap_pool *pool) @@ -659,8 +663,7 @@ static int zswap_reclaim_entry(struct zswap_pool *pool) * swapcache. Drop the entry from zswap - unless invalidate already * took it out while we had the tree->lock released for IO. */ - if (entry == zswap_rb_search(&tree->rbroot, swpoffset)) - zswap_invalidate_entry(tree, entry); + zswap_invalidate_entry(tree, entry); put_unlock: /* Drop local reference */ @@ -1466,7 +1469,6 @@ stats: count_objcg_event(entry->objcg, ZSWPIN); freeentry: spin_lock(&tree->lock); - zswap_entry_put(tree, entry); if (!ret && zswap_exclusive_loads_enabled) { zswap_invalidate_entry(tree, entry); *exclusive = true; @@ -1475,6 +1477,7 @@ freeentry: list_move(&entry->lru, &entry->pool->lru); spin_unlock(&entry->pool->lru_lock); } + zswap_entry_put(tree, entry); spin_unlock(&tree->lock); return ret; -- cgit v1.2.3 From 56ae0bb349b4eeb172674d4876f2b6290d505a25 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Mon, 19 Jun 2023 19:07:17 +0800 Subject: mm: compaction: convert to use a folio in isolate_migratepages_block() Directly use a folio instead of page_folio() when page successfully isolated (hugepage and movable page) and after folio_get_nontail_page(), which removes several calls to compound_head(). Link: https://lkml.kernel.org/r/20230619110718.65679-1-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: Baolin Wang Cc: James Gowans Cc: Matthew Wilcox Cc: Yu Zhao Signed-off-by: Andrew Morton --- mm/compaction.c | 84 ++++++++++++++++++++++++++++++--------------------------- 1 file changed, 44 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 0fb3b89b3967..dbc9f86b1934 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -822,6 +822,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, struct lruvec *lruvec; unsigned long flags = 0; struct lruvec *locked = NULL; + struct folio *folio = NULL; struct page *page = NULL, *valid_page = NULL; struct address_space *mapping; unsigned long start_pfn = low_pfn; @@ -918,7 +919,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, if (!valid_page && pageblock_aligned(low_pfn)) { if (!isolation_suitable(cc, page)) { low_pfn = end_pfn; - page = NULL; + folio = NULL; goto isolate_abort; } valid_page = page; @@ -950,7 +951,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * Hugepage was successfully isolated and placed * on the cc->migratepages list. */ - low_pfn += compound_nr(page) - 1; + folio = page_folio(page); + low_pfn += folio_nr_pages(folio) - 1; goto isolate_success_no_list; } @@ -1018,8 +1020,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, locked = NULL; } - if (isolate_movable_page(page, mode)) + if (isolate_movable_page(page, mode)) { + folio = page_folio(page); goto isolate_success; + } } goto isolate_fail; @@ -1030,7 +1034,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * sure the page is not being freed elsewhere -- the * page release code relies on it. */ - if (unlikely(!get_page_unless_zero(page))) + folio = folio_get_nontail_page(page); + if (unlikely(!folio)) goto isolate_fail; /* @@ -1038,8 +1043,8 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * so avoid taking lru_lock and isolating it unnecessarily in an * admittedly racy check. */ - mapping = page_mapping(page); - if (!mapping && (page_count(page) - 1) > total_mapcount(page)) + mapping = folio_mapping(folio); + if (!mapping && (folio_ref_count(folio) - 1) > folio_mapcount(folio)) goto isolate_fail_put; /* @@ -1050,11 +1055,11 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, goto isolate_fail_put; /* Only take pages on LRU: a check now makes later tests safe */ - if (!PageLRU(page)) + if (!folio_test_lru(folio)) goto isolate_fail_put; /* Compaction might skip unevictable pages but CMA takes them */ - if (!(mode & ISOLATE_UNEVICTABLE) && PageUnevictable(page)) + if (!(mode & ISOLATE_UNEVICTABLE) && folio_test_unevictable(folio)) goto isolate_fail_put; /* @@ -1063,10 +1068,10 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * it will be able to migrate without blocking - clean pages * for the most part. PageWriteback would require blocking. */ - if ((mode & ISOLATE_ASYNC_MIGRATE) && PageWriteback(page)) + if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_writeback(folio)) goto isolate_fail_put; - if ((mode & ISOLATE_ASYNC_MIGRATE) && PageDirty(page)) { + if ((mode & ISOLATE_ASYNC_MIGRATE) && folio_test_dirty(folio)) { bool migrate_dirty; /* @@ -1078,22 +1083,22 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, * the page lock until after the page is removed * from the page cache. */ - if (!trylock_page(page)) + if (!folio_trylock(folio)) goto isolate_fail_put; - mapping = page_mapping(page); + mapping = folio_mapping(folio); migrate_dirty = !mapping || mapping->a_ops->migrate_folio; - unlock_page(page); + folio_unlock(folio); if (!migrate_dirty) goto isolate_fail_put; } - /* Try isolate the page */ - if (!TestClearPageLRU(page)) + /* Try isolate the folio */ + if (!folio_test_clear_lru(folio)) goto isolate_fail_put; - lruvec = folio_lruvec(page_folio(page)); + lruvec = folio_lruvec(folio); /* If we already hold the lock, we can skip some rechecking */ if (lruvec != locked) { @@ -1103,7 +1108,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, compact_lock_irqsave(&lruvec->lru_lock, &flags, cc); locked = lruvec; - lruvec_memcg_debug(lruvec, page_folio(page)); + lruvec_memcg_debug(lruvec, folio); /* * Try get exclusive access under lock. If marked for @@ -1119,34 +1124,33 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, } /* - * Page become compound since the non-locked check, - * and it's on LRU. It can only be a THP so the order - * is safe to read and it's 0 for tail pages. + * folio become large since the non-locked check, + * and it's on LRU. */ - if (unlikely(PageCompound(page) && !cc->alloc_contig)) { - low_pfn += compound_nr(page) - 1; - nr_scanned += compound_nr(page) - 1; - SetPageLRU(page); + if (unlikely(folio_test_large(folio) && !cc->alloc_contig)) { + low_pfn += folio_nr_pages(folio) - 1; + nr_scanned += folio_nr_pages(folio) - 1; + folio_set_lru(folio); goto isolate_fail_put; } } - /* The whole page is taken off the LRU; skip the tail pages. */ - if (PageCompound(page)) - low_pfn += compound_nr(page) - 1; + /* The folio is taken off the LRU */ + if (folio_test_large(folio)) + low_pfn += folio_nr_pages(folio) - 1; /* Successfully isolated */ - del_page_from_lru_list(page, lruvec); - mod_node_page_state(page_pgdat(page), - NR_ISOLATED_ANON + page_is_file_lru(page), - thp_nr_pages(page)); + lruvec_del_folio(lruvec, folio); + node_stat_mod_folio(folio, + NR_ISOLATED_ANON + folio_is_file_lru(folio), + folio_nr_pages(folio)); isolate_success: - list_add(&page->lru, &cc->migratepages); + list_add(&folio->lru, &cc->migratepages); isolate_success_no_list: - cc->nr_migratepages += compound_nr(page); - nr_isolated += compound_nr(page); - nr_scanned += compound_nr(page) - 1; + cc->nr_migratepages += folio_nr_pages(folio); + nr_isolated += folio_nr_pages(folio); + nr_scanned += folio_nr_pages(folio) - 1; /* * Avoid isolating too much unless this block is being @@ -1168,7 +1172,7 @@ isolate_fail_put: unlock_page_lruvec_irqrestore(locked, flags); locked = NULL; } - put_page(page); + folio_put(folio); isolate_fail: if (!skip_on_failure && ret != -ENOMEM) @@ -1209,14 +1213,14 @@ isolate_fail: if (unlikely(low_pfn > end_pfn)) low_pfn = end_pfn; - page = NULL; + folio = NULL; isolate_abort: if (locked) unlock_page_lruvec_irqrestore(locked, flags); - if (page) { - SetPageLRU(page); - put_page(page); + if (folio) { + folio_set_lru(folio); + folio_put(folio); } /* -- cgit v1.2.3 From 7a704474b3022dabbb68f72bf18a3d89ec1c0a24 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 21 Jun 2023 02:30:53 +0000 Subject: mm: memcg: rename and document global_reclaim() Evidently, global_reclaim() can be a confusing name. Especially that it used to exist before with a subtly different definition (removed by commit b5ead35e7e1d ("mm: vmscan: naming fixes: global_reclaim() and sane_reclaim()"). It can be interpreted as non-cgroup reclaim, even though it returns true for cgroup reclaim on the root memcg (through memory.reclaim). Rename it to root_reclaim() in an attempt to make it less ambiguous, and add documentation to it as well as cgroup_reclaim. Link: https://lkml.kernel.org/r/20230621023053.432374-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed Reported-by: Johannes Weiner Closes: https://lore.kernel.org/lkml/20230405200150.GA35884@cmpxchg.org/ Acked-by: Yu Zhao Cc: Michal Hocko Cc: Muchun Song Cc: Roman Gushchin Cc: Shakeel Butt Signed-off-by: Andrew Morton --- mm/vmscan.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 049342b6317c..b7068be8a034 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -429,12 +429,17 @@ void reparent_shrinker_deferred(struct mem_cgroup *memcg) up_read(&shrinker_rwsem); } +/* Returns true for reclaim through cgroup limits or cgroup interfaces. */ static bool cgroup_reclaim(struct scan_control *sc) { return sc->target_mem_cgroup; } -static bool global_reclaim(struct scan_control *sc) +/* + * Returns true for reclaim on the root cgroup. This is true for direct + * allocator reclaim and reclaim through cgroup interfaces on the root cgroup. + */ +static bool root_reclaim(struct scan_control *sc) { return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); } @@ -489,7 +494,7 @@ static bool cgroup_reclaim(struct scan_control *sc) return false; } -static bool global_reclaim(struct scan_control *sc) +static bool root_reclaim(struct scan_control *sc) { return true; } @@ -546,7 +551,7 @@ static void flush_reclaim_state(struct scan_control *sc) * memcg reclaim, to make reporting more accurate and reduce * underestimation, but it's probably not worth the complexity for now. */ - if (current->reclaim_state && global_reclaim(sc)) { + if (current->reclaim_state && root_reclaim(sc)) { sc->nr_reclaimed += current->reclaim_state->reclaimed; current->reclaim_state->reclaimed = 0; } @@ -5325,7 +5330,7 @@ static long get_nr_to_scan(struct lruvec *lruvec, struct scan_control *sc, bool static unsigned long get_nr_to_reclaim(struct scan_control *sc) { /* don't abort memcg reclaim to ensure fairness */ - if (!global_reclaim(sc)) + if (!root_reclaim(sc)) return -1; return max(sc->nr_to_reclaim, compact_gap(sc->order)); @@ -5477,7 +5482,7 @@ static void lru_gen_shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc { struct blk_plug plug; - VM_WARN_ON_ONCE(global_reclaim(sc)); + VM_WARN_ON_ONCE(root_reclaim(sc)); VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); lru_add_drain(); @@ -5538,7 +5543,7 @@ static void lru_gen_shrink_node(struct pglist_data *pgdat, struct scan_control * struct blk_plug plug; unsigned long reclaimed = sc->nr_reclaimed; - VM_WARN_ON_ONCE(!global_reclaim(sc)); + VM_WARN_ON_ONCE(!root_reclaim(sc)); /* * Unmapped clean folios are already prioritized. Scanning for more of @@ -6260,7 +6265,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) bool proportional_reclaim; struct blk_plug plug; - if (lru_gen_enabled() && !global_reclaim(sc)) { + if (lru_gen_enabled() && !root_reclaim(sc)) { lru_gen_shrink_lruvec(lruvec, sc); return; } @@ -6501,7 +6506,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc) struct lruvec *target_lruvec; bool reclaimable = false; - if (lru_gen_enabled() && global_reclaim(sc)) { + if (lru_gen_enabled() && root_reclaim(sc)) { lru_gen_shrink_node(pgdat, sc); return; } -- cgit v1.2.3 From 1bc545bff45ce9eefc176ccf663074462a209cb6 Mon Sep 17 00:00:00 2001 From: Yosry Ahmed Date: Wed, 21 Jun 2023 02:31:01 +0000 Subject: mm/vmscan: fix root proactive reclaim unthrottling unbalanced node When memory.reclaim was introduced, it became the first case where cgroup_reclaim() is true for the root cgroup. Johannes concluded [1] that for most cases this is okay, except for one case. Historically, kswapd would throttle reclaim on a node if a lot of pages marked for reclaim are under writeback (aka the node is congested). This occurred by setting LRUVEC_CONGESTED bit in lruvec->flags. The bit would be cleared when the node is balanced. Similarly, cgroup reclaim would set the same bit when an lruvec is congested, and clear it on the way out of reclaim (to throttle local reclaimers). Before the introduction of memory.reclaim, the root memcg was the only target of kswapd reclaim, and non-root memcgs were the only targets of cgroup reclaim, so they would never interfere. Using the same bit for both was fine. After memory.reclaim, it is possible for cgroup reclaim on the root cgroup to clear the bit set by kswapd. This would result in reclaim on the node to be unthrottled before the node is balanced. Fix this by introducing separate bits for cgroup-level and node-level congestion. kswapd can unthrottle an lruvec that is marked as congested by cgroup reclaim (as the entire node should no longer be congested), but not vice versa (to prevent premature unthrottling before the entire node is balanced). [1]https://lore.kernel.org/lkml/20230405200150.GA35884@cmpxchg.org/ Link: https://lkml.kernel.org/r/20230621023101.432780-1-yosryahmed@google.com Signed-off-by: Yosry Ahmed Reported-by: Johannes Weiner Closes: https://lore.kernel.org/lkml/20230405200150.GA35884@cmpxchg.org/ Cc: Michal Hocko Cc: Roman Gushchin Cc: Shakeel Butt Cc: Muchun Song Cc: Yu Zhao Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 18 +++++++++++++++--- mm/vmscan.c | 19 ++++++++++++------- 2 files changed, 27 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 3e822335f214..d863698a84e0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -293,9 +293,21 @@ static inline bool is_active_lru(enum lru_list lru) #define ANON_AND_FILE 2 enum lruvec_flags { - LRUVEC_CONGESTED, /* lruvec has many dirty pages - * backed by a congested BDI - */ + /* + * An lruvec has many dirty pages backed by a congested BDI: + * 1. LRUVEC_CGROUP_CONGESTED is set by cgroup-level reclaim. + * It can be cleared by cgroup reclaim or kswapd. + * 2. LRUVEC_NODE_CONGESTED is set by kswapd node-level reclaim. + * It can only be cleared by kswapd. + * + * Essentially, kswapd can unthrottle an lruvec throttled by cgroup + * reclaim, but not vice versa. This only applies to the root cgroup. + * The goal is to prevent cgroup reclaim on the root cgroup (e.g. + * memory.reclaim) to unthrottle an unbalanced node (that was throttled + * by kswapd). + */ + LRUVEC_CGROUP_CONGESTED, + LRUVEC_NODE_CONGESTED, }; #endif /* !__GENERATING_BOUNDS_H */ diff --git a/mm/vmscan.c b/mm/vmscan.c index b7068be8a034..1080209a568b 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -6578,10 +6578,13 @@ again: * Legacy memcg will stall in page writeback so avoid forcibly * stalling in reclaim_throttle(). */ - if ((current_is_kswapd() || - (cgroup_reclaim(sc) && writeback_throttling_sane(sc))) && - sc->nr.dirty && sc->nr.dirty == sc->nr.congested) - set_bit(LRUVEC_CONGESTED, &target_lruvec->flags); + if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { + if (cgroup_reclaim(sc) && writeback_throttling_sane(sc)) + set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); + + if (current_is_kswapd()) + set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); + } /* * Stall direct reclaim for IO completions if the lruvec is @@ -6591,7 +6594,8 @@ again: */ if (!current_is_kswapd() && current_may_throttle() && !sc->hibernation_mode && - test_bit(LRUVEC_CONGESTED, &target_lruvec->flags)) + (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || + test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) reclaim_throttle(pgdat, VMSCAN_THROTTLE_CONGESTED); if (should_continue_reclaim(pgdat, nr_node_reclaimed, sc)) @@ -6848,7 +6852,7 @@ retry: lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, zone->zone_pgdat); - clear_bit(LRUVEC_CONGESTED, &lruvec->flags); + clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); } } @@ -7237,7 +7241,8 @@ static void clear_pgdat_congested(pg_data_t *pgdat) { struct lruvec *lruvec = mem_cgroup_lruvec(NULL, pgdat); - clear_bit(LRUVEC_CONGESTED, &lruvec->flags); + clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); + clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); clear_bit(PGDAT_DIRTY, &pgdat->flags); clear_bit(PGDAT_WRITEBACK, &pgdat->flags); } -- cgit v1.2.3 From 16f8eb3eea9eb2a1568279d64ca4dc977e7aa538 Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 21 Jun 2023 14:24:02 -0700 Subject: Revert "page cache: fix page_cache_next/prev_miss off by one" This reverts commit 9425c591e06a9ab27a145ba655fb50532cf0bcc9 The reverted commit fixed up routines primarily used by readahead code such that they could also be used by hugetlb. Unfortunately, this caused a performance regression as pointed out by the Closes: tag. The hugetlb code which uses page_cache_next_miss will be addressed in a subsequent patch. Link: https://lkml.kernel.org/r/20230621212403.174710-1-mike.kravetz@oracle.com Fixes: 9425c591e06a ("page cache: fix page_cache_next/prev_miss off by one") Signed-off-by: Mike Kravetz Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202306211346.1e9ff03e-oliver.sang@intel.com Reviewed-by: Sidhartha Kumar Cc: Ackerley Tng Cc: Erdem Aktas Cc: Greg Kroah-Hartman Cc: Matthew Wilcox Cc: Muchun Song Cc: Vishal Annapurve Signed-off-by: Andrew Morton --- mm/filemap.c | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/filemap.c b/mm/filemap.c index c20e0b1997e8..758bbdf300e7 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1728,9 +1728,7 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, * * Return: The index of the gap if found, otherwise an index outside the * range specified (in which case 'return - index >= max_scan' will be true). - * In the rare case of index wrap-around, 0 will be returned. 0 will also - * be returned if index == 0 and there is a gap at the index. We can not - * wrap-around if passed index == 0. + * In the rare case of index wrap-around, 0 will be returned. */ pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan) @@ -1740,13 +1738,12 @@ pgoff_t page_cache_next_miss(struct address_space *mapping, while (max_scan--) { void *entry = xas_next(&xas); if (!entry || xa_is_value(entry)) - return xas.xa_index; - if (xas.xa_index == 0 && index != 0) - return xas.xa_index; + break; + if (xas.xa_index == 0) + break; } - /* No gaps in range and no wrap-around, return index beyond range */ - return xas.xa_index + 1; + return xas.xa_index; } EXPORT_SYMBOL(page_cache_next_miss); @@ -1767,9 +1764,7 @@ EXPORT_SYMBOL(page_cache_next_miss); * * Return: The index of the gap if found, otherwise an index outside the * range specified (in which case 'index - return >= max_scan' will be true). - * In the rare case of wrap-around, ULONG_MAX will be returned. ULONG_MAX - * will also be returned if index == ULONG_MAX and there is a gap at the - * index. We can not wrap-around if passed index == ULONG_MAX. + * In the rare case of wrap-around, ULONG_MAX will be returned. */ pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan) @@ -1779,13 +1774,12 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, while (max_scan--) { void *entry = xas_prev(&xas); if (!entry || xa_is_value(entry)) - return xas.xa_index; - if (xas.xa_index == ULONG_MAX && index != ULONG_MAX) - return xas.xa_index; + break; + if (xas.xa_index == ULONG_MAX) + break; } - /* No gaps in range and no wrap-around, return index beyond range */ - return xas.xa_index - 1; + return xas.xa_index; } EXPORT_SYMBOL(page_cache_prev_miss); -- cgit v1.2.3 From fd4aed8d985a3236d0877ff6d0c80ad39d4ce81a Mon Sep 17 00:00:00 2001 From: Mike Kravetz Date: Wed, 21 Jun 2023 14:24:03 -0700 Subject: hugetlb: revert use of page_cache_next_miss() Ackerley Tng reported an issue with hugetlbfs fallocate as noted in the Closes tag. The issue showed up after the conversion of hugetlb page cache lookup code to use page_cache_next_miss. User visible effects are: - hugetlbfs fallocate incorrectly returns -EEXIST if pages are presnet in the file. - hugetlb pages will not be included in core dumps if they need to be brought in via GUP. - userfaultfd UFFDIO_COPY will not notice pages already present in the cache. It may try to allocate a new page and potentially return ENOMEM as opposed to EEXIST. Revert the use page_cache_next_miss() in hugetlb code. IMPORTANT NOTE FOR STABLE BACKPORTS: This patch will apply cleanly to v6.3. However, due to the change of filemap_get_folio() return values, it will not function correctly. This patch must be modified for stable backports. [dan.carpenter@linaro.org: fix hugetlbfs_pagecache_present()] Link: https://lkml.kernel.org/r/efa86091-6a2c-4064-8f55-9b44e1313015@moroto.mountain Link: https://lkml.kernel.org/r/20230621212403.174710-2-mike.kravetz@oracle.com Fixes: d0ce0e47b323 ("mm/hugetlb: convert hugetlb fault paths to use alloc_hugetlb_folio()") Signed-off-by: Mike Kravetz Signed-off-by: Dan Carpenter Reported-by: Ackerley Tng Closes: https://lore.kernel.org/linux-mm/cover.1683069252.git.ackerleytng@google.com Reviewed-by: Sidhartha Kumar Cc: Erdem Aktas Cc: Greg Kroah-Hartman Cc: Matthew Wilcox Cc: Muchun Song Cc: Vishal Annapurve Signed-off-by: Andrew Morton --- fs/hugetlbfs/inode.c | 8 +++----- mm/hugetlb.c | 12 ++++++------ 2 files changed, 9 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 90361a922cec..7b17ccfa039d 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -821,7 +821,6 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, */ struct folio *folio; unsigned long addr; - bool present; cond_resched(); @@ -842,10 +841,9 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset, mutex_lock(&hugetlb_fault_mutex_table[hash]); /* See if already present in mapping to avoid alloc/free */ - rcu_read_lock(); - present = page_cache_next_miss(mapping, index, 1) != index; - rcu_read_unlock(); - if (present) { + folio = filemap_get_folio(mapping, index); + if (!IS_ERR(folio)) { + folio_put(folio); mutex_unlock(&hugetlb_fault_mutex_table[hash]); continue; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d76574425da3..bce28cca73a1 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -5728,13 +5728,13 @@ static bool hugetlbfs_pagecache_present(struct hstate *h, { struct address_space *mapping = vma->vm_file->f_mapping; pgoff_t idx = vma_hugecache_offset(h, vma, address); - bool present; - - rcu_read_lock(); - present = page_cache_next_miss(mapping, idx, 1) != idx; - rcu_read_unlock(); + struct folio *folio; - return present; + folio = filemap_get_folio(mapping, idx); + if (IS_ERR(folio)) + return false; + folio_put(folio); + return true; } int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, -- cgit v1.2.3