diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/damon/dbgfs-test.h | 16 | ||||
-rw-r--r-- | mm/debug.c | 4 | ||||
-rw-r--r-- | mm/huge_memory.c | 6 | ||||
-rw-r--r-- | mm/memblock.c | 5 | ||||
-rw-r--r-- | mm/memcontrol.c | 10 | ||||
-rw-r--r-- | mm/memory-failure.c | 12 | ||||
-rw-r--r-- | mm/memory.c | 1 | ||||
-rw-r--r-- | mm/mempolicy.c | 16 | ||||
-rw-r--r-- | mm/migrate.c | 62 | ||||
-rw-r--r-- | mm/page_ext.c | 4 | ||||
-rw-r--r-- | mm/secretmem.c | 11 | ||||
-rw-r--r-- | mm/shmem.c | 4 | ||||
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/slub.c | 31 | ||||
-rw-r--r-- | mm/swap.c | 19 | ||||
-rw-r--r-- | mm/util.c | 4 | ||||
-rw-r--r-- | mm/workingset.c | 1 |
17 files changed, 122 insertions, 88 deletions
diff --git a/mm/damon/dbgfs-test.h b/mm/damon/dbgfs-test.h index 930e83bceef0..4eddcfa73996 100644 --- a/mm/damon/dbgfs-test.h +++ b/mm/damon/dbgfs-test.h @@ -20,27 +20,27 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test) ssize_t nr_integers = 0, i; question = "123"; - answers = str_to_target_ids(question, strnlen(question, 128), + answers = str_to_target_ids(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); KUNIT_EXPECT_EQ(test, 123ul, answers[0]); kfree(answers); question = "123abc"; - answers = str_to_target_ids(question, strnlen(question, 128), + answers = str_to_target_ids(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)1, nr_integers); KUNIT_EXPECT_EQ(test, 123ul, answers[0]); kfree(answers); question = "a123"; - answers = str_to_target_ids(question, strnlen(question, 128), + answers = str_to_target_ids(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); kfree(answers); question = "12 35"; - answers = str_to_target_ids(question, strnlen(question, 128), + answers = str_to_target_ids(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); for (i = 0; i < nr_integers; i++) @@ -48,7 +48,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test) kfree(answers); question = "12 35 46"; - answers = str_to_target_ids(question, strnlen(question, 128), + answers = str_to_target_ids(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)3, nr_integers); for (i = 0; i < nr_integers; i++) @@ -56,7 +56,7 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test) kfree(answers); question = "12 35 abc 46"; - answers = str_to_target_ids(question, strnlen(question, 128), + answers = str_to_target_ids(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)2, nr_integers); for (i = 0; i < 2; i++) @@ -64,13 +64,13 @@ static void damon_dbgfs_test_str_to_target_ids(struct kunit *test) kfree(answers); question = ""; - answers = str_to_target_ids(question, strnlen(question, 128), + answers = str_to_target_ids(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); kfree(answers); question = "\n"; - answers = str_to_target_ids(question, strnlen(question, 128), + answers = str_to_target_ids(question, strlen(question), &nr_integers); KUNIT_EXPECT_EQ(test, (ssize_t)0, nr_integers); kfree(answers); diff --git a/mm/debug.c b/mm/debug.c index e73fe0a8ec3d..fae0f81ad831 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -24,7 +24,9 @@ const char *migrate_reason_names[MR_TYPES] = { "syscall_or_cpuset", "mempolicy_mbind", "numa_misplaced", - "cma", + "contig_range", + "longterm_pin", + "demotion", }; const struct trace_print_flags pageflag_names[] = { diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 5e9ef0fc261e..92192cb086c7 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2700,12 +2700,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list) if (mapping) { int nr = thp_nr_pages(head); - if (PageSwapBacked(head)) + if (PageSwapBacked(head)) { __mod_lruvec_page_state(head, NR_SHMEM_THPS, -nr); - else + } else { __mod_lruvec_page_state(head, NR_FILE_THPS, -nr); + filemap_nr_thps_dec(mapping); + } } __split_huge_page(page, list, end); diff --git a/mm/memblock.c b/mm/memblock.c index 184dcd2e5d99..5096500b2647 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -932,6 +932,9 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) * covered by the memory map. The struct page representing NOMAP memory * frames in the memory map will be PageReserved() * + * Note: if the memory being marked %MEMBLOCK_NOMAP was allocated from + * memblock, the caller must inform kmemleak to ignore that memory + * * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) @@ -1687,7 +1690,7 @@ void __init memblock_cap_memory_range(phys_addr_t base, phys_addr_t size) if (!size) return; - if (memblock.memory.cnt <= 1) { + if (!memblock_memory->total_size) { pr_warn("%s: No memory registered yet\n", __func__); return; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b762215d73eb..6da5020a8656 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -106,9 +106,6 @@ static bool do_memsw_account(void) /* memcg and lruvec stats flushing */ static void flush_memcg_stats_dwork(struct work_struct *w); static DECLARE_DEFERRABLE_WORK(stats_flush_dwork, flush_memcg_stats_dwork); -static void flush_memcg_stats_work(struct work_struct *w); -static DECLARE_WORK(stats_flush_work, flush_memcg_stats_work); -static DEFINE_PER_CPU(unsigned int, stats_flush_threshold); static DEFINE_SPINLOCK(stats_flush_lock); #define THRESHOLDS_EVENTS_TARGET 128 @@ -682,8 +679,6 @@ void __mod_memcg_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx, /* Update lruvec */ __this_cpu_add(pn->lruvec_stats_percpu->state[idx], val); - if (!(__this_cpu_inc_return(stats_flush_threshold) % MEMCG_CHARGE_BATCH)) - queue_work(system_unbound_wq, &stats_flush_work); } /** @@ -5361,11 +5356,6 @@ static void flush_memcg_stats_dwork(struct work_struct *w) queue_delayed_work(system_unbound_wq, &stats_flush_dwork, 2UL*HZ); } -static void flush_memcg_stats_work(struct work_struct *w) -{ - mem_cgroup_flush_stats(); -} - static void mem_cgroup_css_rstat_flush(struct cgroup_subsys_state *css, int cpu) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 54879c339024..3e6449f2102a 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -306,6 +306,7 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page, struct vm_area_struct *vma) { unsigned long address = vma_address(page, vma); + unsigned long ret = 0; pgd_t *pgd; p4d_t *p4d; pud_t *pud; @@ -329,11 +330,10 @@ static unsigned long dev_pagemap_mapping_shift(struct page *page, if (pmd_devmap(*pmd)) return PMD_SHIFT; pte = pte_offset_map(pmd, address); - if (!pte_present(*pte)) - return 0; - if (pte_devmap(*pte)) - return PAGE_SHIFT; - return 0; + if (pte_present(*pte) && pte_devmap(*pte)) + ret = PAGE_SHIFT; + pte_unmap(pte); + return ret; } /* @@ -1126,7 +1126,7 @@ static int page_action(struct page_state *ps, struct page *p, */ static inline bool HWPoisonHandlable(struct page *page) { - return PageLRU(page) || __PageMovable(page); + return PageLRU(page) || __PageMovable(page) || is_free_buddy_page(page); } static int __get_hwpoison_page(struct page *page) diff --git a/mm/memory.c b/mm/memory.c index 25fc46e87214..adf9b9ef8277 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -3403,6 +3403,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start, unmap_mapping_range_tree(&mapping->i_mmap, &details); i_mmap_unlock_write(mapping); } +EXPORT_SYMBOL_GPL(unmap_mapping_pages); /** * unmap_mapping_range - unmap the portion of all mmaps in the specified diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 1592b081c58e..d12e0608fced 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -856,16 +856,6 @@ static long do_set_mempolicy(unsigned short mode, unsigned short flags, goto out; } - if (flags & MPOL_F_NUMA_BALANCING) { - if (new && new->mode == MPOL_BIND) { - new->flags |= (MPOL_F_MOF | MPOL_F_MORON); - } else { - ret = -EINVAL; - mpol_put(new); - goto out; - } - } - ret = mpol_set_nodemask(new, nodes, scratch); if (ret) { mpol_put(new); @@ -1458,7 +1448,11 @@ static inline int sanitize_mpol_flags(int *mode, unsigned short *flags) return -EINVAL; if ((*flags & MPOL_F_STATIC_NODES) && (*flags & MPOL_F_RELATIVE_NODES)) return -EINVAL; - + if (*flags & MPOL_F_NUMA_BALANCING) { + if (*mode != MPOL_BIND) + return -EINVAL; + *flags |= (MPOL_F_MOF | MPOL_F_MORON); + } return 0; } diff --git a/mm/migrate.c b/mm/migrate.c index a6a7743ee98f..1852d787e6ab 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -3066,7 +3066,7 @@ void migrate_vma_finalize(struct migrate_vma *migrate) EXPORT_SYMBOL(migrate_vma_finalize); #endif /* CONFIG_DEVICE_PRIVATE */ -#if defined(CONFIG_MEMORY_HOTPLUG) +#if defined(CONFIG_HOTPLUG_CPU) /* Disable reclaim-based migration. */ static void __disable_all_migrate_targets(void) { @@ -3209,25 +3209,6 @@ static void set_migration_target_nodes(void) } /* - * React to hotplug events that might affect the migration targets - * like events that online or offline NUMA nodes. - * - * The ordering is also currently dependent on which nodes have - * CPUs. That means we need CPU on/offline notification too. - */ -static int migration_online_cpu(unsigned int cpu) -{ - set_migration_target_nodes(); - return 0; -} - -static int migration_offline_cpu(unsigned int cpu) -{ - set_migration_target_nodes(); - return 0; -} - -/* * This leaves migrate-on-reclaim transiently disabled between * the MEM_GOING_OFFLINE and MEM_OFFLINE events. This runs * whether reclaim-based migration is enabled or not, which @@ -3239,8 +3220,18 @@ static int migration_offline_cpu(unsigned int cpu) * set_migration_target_nodes(). */ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self, - unsigned long action, void *arg) + unsigned long action, void *_arg) { + struct memory_notify *arg = _arg; + + /* + * Only update the node migration order when a node is + * changing status, like online->offline. This avoids + * the overhead of synchronize_rcu() in most cases. + */ + if (arg->status_change_nid < 0) + return notifier_from_errno(0); + switch (action) { case MEM_GOING_OFFLINE: /* @@ -3274,13 +3265,31 @@ static int __meminit migrate_on_reclaim_callback(struct notifier_block *self, return notifier_from_errno(0); } +/* + * React to hotplug events that might affect the migration targets + * like events that online or offline NUMA nodes. + * + * The ordering is also currently dependent on which nodes have + * CPUs. That means we need CPU on/offline notification too. + */ +static int migration_online_cpu(unsigned int cpu) +{ + set_migration_target_nodes(); + return 0; +} + +static int migration_offline_cpu(unsigned int cpu) +{ + set_migration_target_nodes(); + return 0; +} + static int __init migrate_on_reclaim_init(void) { int ret; - ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "migrate on reclaim", - migration_online_cpu, - migration_offline_cpu); + ret = cpuhp_setup_state_nocalls(CPUHP_MM_DEMOTION_DEAD, "mm/demotion:offline", + NULL, migration_offline_cpu); /* * In the unlikely case that this fails, the automatic * migration targets may become suboptimal for nodes @@ -3288,9 +3297,12 @@ static int __init migrate_on_reclaim_init(void) * rare case, do not bother trying to do anything special. */ WARN_ON(ret < 0); + ret = cpuhp_setup_state(CPUHP_AP_MM_DEMOTION_ONLINE, "mm/demotion:online", + migration_online_cpu, NULL); + WARN_ON(ret < 0); hotplug_memory_notifier(migrate_on_reclaim_callback, 100); return 0; } late_initcall(migrate_on_reclaim_init); -#endif /* CONFIG_MEMORY_HOTPLUG */ +#endif /* CONFIG_HOTPLUG_CPU */ diff --git a/mm/page_ext.c b/mm/page_ext.c index dfb91653d359..2a52fd9ed464 100644 --- a/mm/page_ext.c +++ b/mm/page_ext.c @@ -269,7 +269,7 @@ static int __meminit init_section_page_ext(unsigned long pfn, int nid) total_usage += table_size; return 0; } -#ifdef CONFIG_MEMORY_HOTPLUG + static void free_page_ext(void *addr) { if (is_vmalloc_addr(addr)) { @@ -374,8 +374,6 @@ static int __meminit page_ext_callback(struct notifier_block *self, return notifier_from_errno(ret); } -#endif - void __init page_ext_init(void) { unsigned long pfn; diff --git a/mm/secretmem.c b/mm/secretmem.c index 1fea68b8d5a6..c2dda408bb36 100644 --- a/mm/secretmem.c +++ b/mm/secretmem.c @@ -18,7 +18,6 @@ #include <linux/secretmem.h> #include <linux/set_memory.h> #include <linux/sched/signal.h> -#include <linux/refcount.h> #include <uapi/linux/magic.h> @@ -41,11 +40,11 @@ module_param_named(enable, secretmem_enable, bool, 0400); MODULE_PARM_DESC(secretmem_enable, "Enable secretmem and memfd_secret(2) system call"); -static refcount_t secretmem_users; +static atomic_t secretmem_users; bool secretmem_active(void) { - return !!refcount_read(&secretmem_users); + return !!atomic_read(&secretmem_users); } static vm_fault_t secretmem_fault(struct vm_fault *vmf) @@ -104,7 +103,7 @@ static const struct vm_operations_struct secretmem_vm_ops = { static int secretmem_release(struct inode *inode, struct file *file) { - refcount_dec(&secretmem_users); + atomic_dec(&secretmem_users); return 0; } @@ -204,6 +203,8 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags) if (flags & ~(SECRETMEM_FLAGS_MASK | O_CLOEXEC)) return -EINVAL; + if (atomic_read(&secretmem_users) < 0) + return -ENFILE; fd = get_unused_fd_flags(flags & O_CLOEXEC); if (fd < 0) @@ -218,7 +219,7 @@ SYSCALL_DEFINE1(memfd_secret, unsigned int, flags) file->f_flags |= O_LARGEFILE; fd_install(fd, file); - refcount_inc(&secretmem_users); + atomic_inc(&secretmem_users); return fd; err_put_fd: diff --git a/mm/shmem.c b/mm/shmem.c index 88742953532c..b5860f4a2738 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -490,9 +490,9 @@ bool shmem_is_huge(struct vm_area_struct *vma, case SHMEM_HUGE_ALWAYS: return true; case SHMEM_HUGE_WITHIN_SIZE: - index = round_up(index, HPAGE_PMD_NR); + index = round_up(index + 1, HPAGE_PMD_NR); i_size = round_up(i_size_read(inode), PAGE_SIZE); - if (i_size >= HPAGE_PMD_SIZE && (i_size >> PAGE_SHIFT) >= index) + if (i_size >> PAGE_SHIFT >= index) return true; fallthrough; case SHMEM_HUGE_ADVISE: diff --git a/mm/slab.c b/mm/slab.c index d0f725637663..874b3f8fe80d 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1095,7 +1095,7 @@ static int slab_offline_cpu(unsigned int cpu) return 0; } -#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG) +#if defined(CONFIG_NUMA) /* * Drains freelist for a node on each slab cache, used for memory hot-remove. * Returns -EBUSY if all objects cannot be drained so that the node is not @@ -1157,7 +1157,7 @@ static int __meminit slab_memory_callback(struct notifier_block *self, out: return notifier_from_errno(ret); } -#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */ +#endif /* CONFIG_NUMA */ /* * swap the static kmem_cache_node with kmalloced memory diff --git a/mm/slub.c b/mm/slub.c index 3d2025f7163b..d8f77346376d 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1701,7 +1701,8 @@ static __always_inline bool slab_free_hook(struct kmem_cache *s, } static inline bool slab_free_freelist_hook(struct kmem_cache *s, - void **head, void **tail) + void **head, void **tail, + int *cnt) { void *object; @@ -1728,6 +1729,12 @@ static inline bool slab_free_freelist_hook(struct kmem_cache *s, *head = object; if (!*tail) *tail = object; + } else { + /* + * Adjust the reconstructed freelist depth + * accordingly if object's reuse is delayed. + */ + --(*cnt); } } while (object != old_tail); @@ -3413,7 +3420,9 @@ static __always_inline void do_slab_free(struct kmem_cache *s, struct kmem_cache_cpu *c; unsigned long tid; - memcg_slab_free_hook(s, &head, 1); + /* memcg_slab_free_hook() is already called for bulk free. */ + if (!tail) + memcg_slab_free_hook(s, &head, 1); redo: /* * Determine the currently cpus per cpu slab. @@ -3480,7 +3489,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct page *page, * With KASAN enabled slab_free_freelist_hook modifies the freelist * to remove objects, whose reuse must be delayed. */ - if (slab_free_freelist_hook(s, &head, &tail)) + if (slab_free_freelist_hook(s, &head, &tail, &cnt)) do_slab_free(s, page, head, tail, cnt, addr); } @@ -4203,8 +4212,8 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags) if (alloc_kmem_cache_cpus(s)) return 0; - free_kmem_cache_nodes(s); error: + __kmem_cache_release(s); return -EINVAL; } @@ -4880,13 +4889,15 @@ int __kmem_cache_create(struct kmem_cache *s, slab_flags_t flags) return 0; err = sysfs_slab_add(s); - if (err) + if (err) { __kmem_cache_release(s); + return err; + } if (s->flags & SLAB_STORE_USER) debugfs_slab_add(s); - return err; + return 0; } void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller) @@ -6108,9 +6119,14 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep) struct kmem_cache *s = file_inode(filep)->i_private; unsigned long *obj_map; + if (!t) + return -ENOMEM; + obj_map = bitmap_alloc(oo_objects(s->oo), GFP_KERNEL); - if (!obj_map) + if (!obj_map) { + seq_release_private(inode, filep); return -ENOMEM; + } if (strcmp(filep->f_path.dentry->d_name.name, "alloc_traces") == 0) alloc = TRACK_ALLOC; @@ -6119,6 +6135,7 @@ static int slab_debug_trace_open(struct inode *inode, struct file *filep) if (!alloc_loc_track(t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { bitmap_free(obj_map); + seq_release_private(inode, filep); return -ENOMEM; } diff --git a/mm/swap.c b/mm/swap.c index 897200d27dd0..af3cad4e5378 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -620,7 +620,6 @@ void lru_add_drain_cpu(int cpu) pagevec_lru_move_fn(pvec, lru_lazyfree_fn); activate_page_drain(cpu); - invalidate_bh_lrus_cpu(cpu); } /** @@ -703,6 +702,20 @@ void lru_add_drain(void) local_unlock(&lru_pvecs.lock); } +/* + * It's called from per-cpu workqueue context in SMP case so + * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on + * the same cpu. It shouldn't be a problem in !SMP case since + * the core is only one and the locks will disable preemption. + */ +static void lru_add_and_bh_lrus_drain(void) +{ + local_lock(&lru_pvecs.lock); + lru_add_drain_cpu(smp_processor_id()); + local_unlock(&lru_pvecs.lock); + invalidate_bh_lrus_cpu(); +} + void lru_add_drain_cpu_zone(struct zone *zone) { local_lock(&lru_pvecs.lock); @@ -717,7 +730,7 @@ static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); static void lru_add_drain_per_cpu(struct work_struct *dummy) { - lru_add_drain(); + lru_add_and_bh_lrus_drain(); } /* @@ -858,7 +871,7 @@ void lru_cache_disable(void) */ __lru_add_drain_all(true); #else - lru_add_drain(); + lru_add_and_bh_lrus_drain(); #endif } diff --git a/mm/util.c b/mm/util.c index 499b6b5767ed..bacabe446906 100644 --- a/mm/util.c +++ b/mm/util.c @@ -787,7 +787,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; - int new_policy; + int new_policy = -1; int ret; /* @@ -805,7 +805,7 @@ int overcommit_policy_handler(struct ctl_table *table, int write, void *buffer, t = *table; t.data = &new_policy; ret = proc_dointvec_minmax(&t, write, buffer, lenp, ppos); - if (ret) + if (ret || new_policy == -1) return ret; mm_compute_batch(new_policy); diff --git a/mm/workingset.c b/mm/workingset.c index d4268d8e9a82..d5b81e4f4cbe 100644 --- a/mm/workingset.c +++ b/mm/workingset.c @@ -352,6 +352,7 @@ void workingset_refault(struct page *page, void *shadow) inc_lruvec_state(lruvec, WORKINGSET_REFAULT_BASE + file); + mem_cgroup_flush_stats(); /* * Compare the distance to the existing workingset size. We * don't activate pages that couldn't stay resident even if |