diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig.debug | 1 | ||||
-rw-r--r-- | mm/damon/core.c | 2 | ||||
-rw-r--r-- | mm/filemap.c | 26 | ||||
-rw-r--r-- | mm/gup_test.c | 1 | ||||
-rw-r--r-- | mm/kfence/kfence.h | 2 | ||||
-rw-r--r-- | mm/mmap.c | 37 | ||||
-rw-r--r-- | mm/page_table_check.c | 6 | ||||
-rw-r--r-- | mm/shrinker_debug.c | 15 | ||||
-rw-r--r-- | mm/vmscan.c | 5 | ||||
-rw-r--r-- | mm/zsmalloc.c | 36 | ||||
-rw-r--r-- | mm/zswap.c | 27 |
11 files changed, 91 insertions, 67 deletions
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index a925415b4d10..018a5bd2f576 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -98,6 +98,7 @@ config PAGE_OWNER config PAGE_TABLE_CHECK bool "Check for invalid mappings in user page tables" depends on ARCH_SUPPORTS_PAGE_TABLE_CHECK + depends on EXCLUSIVE_SYSTEM_RAM select PAGE_EXTENSION help Check that anonymous page is not being mapped twice with read write diff --git a/mm/damon/core.c b/mm/damon/core.c index d9ef62047bf5..91cff7f2997e 100644 --- a/mm/damon/core.c +++ b/mm/damon/core.c @@ -551,6 +551,8 @@ int damon_set_attrs(struct damon_ctx *ctx, struct damon_attrs *attrs) return -EINVAL; if (attrs->min_nr_regions > attrs->max_nr_regions) return -EINVAL; + if (attrs->sample_interval > attrs->aggr_interval) + return -EINVAL; damon_update_monitoring_results(ctx, attrs); ctx->attrs = *attrs; diff --git a/mm/filemap.c b/mm/filemap.c index b4c9bd368b7e..83dda76d1fc3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -1760,7 +1760,9 @@ bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm, * * Return: The index of the gap if found, otherwise an index outside the * range specified (in which case 'return - index >= max_scan' will be true). - * In the rare case of index wrap-around, 0 will be returned. + * In the rare case of index wrap-around, 0 will be returned. 0 will also + * be returned if index == 0 and there is a gap at the index. We can not + * wrap-around if passed index == 0. */ pgoff_t page_cache_next_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan) @@ -1770,12 +1772,13 @@ pgoff_t page_cache_next_miss(struct address_space *mapping, while (max_scan--) { void *entry = xas_next(&xas); if (!entry || xa_is_value(entry)) - break; - if (xas.xa_index == 0) - break; + return xas.xa_index; + if (xas.xa_index == 0 && index != 0) + return xas.xa_index; } - return xas.xa_index; + /* No gaps in range and no wrap-around, return index beyond range */ + return xas.xa_index + 1; } EXPORT_SYMBOL(page_cache_next_miss); @@ -1796,7 +1799,9 @@ EXPORT_SYMBOL(page_cache_next_miss); * * Return: The index of the gap if found, otherwise an index outside the * range specified (in which case 'index - return >= max_scan' will be true). - * In the rare case of wrap-around, ULONG_MAX will be returned. + * In the rare case of wrap-around, ULONG_MAX will be returned. ULONG_MAX + * will also be returned if index == ULONG_MAX and there is a gap at the + * index. We can not wrap-around if passed index == ULONG_MAX. */ pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t index, unsigned long max_scan) @@ -1806,12 +1811,13 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, while (max_scan--) { void *entry = xas_prev(&xas); if (!entry || xa_is_value(entry)) - break; - if (xas.xa_index == ULONG_MAX) - break; + return xas.xa_index; + if (xas.xa_index == ULONG_MAX && index != ULONG_MAX) + return xas.xa_index; } - return xas.xa_index; + /* No gaps in range and no wrap-around, return index beyond range */ + return xas.xa_index - 1; } EXPORT_SYMBOL(page_cache_prev_miss); diff --git a/mm/gup_test.c b/mm/gup_test.c index 8ae7307a1bb6..c0421b786dcd 100644 --- a/mm/gup_test.c +++ b/mm/gup_test.c @@ -381,6 +381,7 @@ static int gup_test_release(struct inode *inode, struct file *file) static const struct file_operations gup_test_fops = { .open = nonseekable_open, .unlocked_ioctl = gup_test_ioctl, + .compat_ioctl = compat_ptr_ioctl, .release = gup_test_release, }; diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h index 2aafc46a4aaf..392fb273e7bd 100644 --- a/mm/kfence/kfence.h +++ b/mm/kfence/kfence.h @@ -29,7 +29,7 @@ * canary of every 8 bytes is the same. 64-bit memory can be filled and checked * at a time instead of byte by byte to improve performance. */ -#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(0x0706050403020100)) +#define KFENCE_CANARY_PATTERN_U64 ((u64)0xaaaaaaaaaaaaaaaa ^ (u64)(le64_to_cpu(0x0706050403020100))) /* Maximum stack depth for reports. */ #define KFENCE_STACK_DEPTH 64 diff --git a/mm/mmap.c b/mm/mmap.c index 13678edaa22c..d600404580b2 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2318,21 +2318,6 @@ int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma, return __split_vma(vmi, vma, addr, new_below); } -static inline int munmap_sidetree(struct vm_area_struct *vma, - struct ma_state *mas_detach) -{ - vma_start_write(vma); - mas_set_range(mas_detach, vma->vm_start, vma->vm_end - 1); - if (mas_store_gfp(mas_detach, vma, GFP_KERNEL)) - return -ENOMEM; - - vma_mark_detached(vma, true); - if (vma->vm_flags & VM_LOCKED) - vma->vm_mm->locked_vm -= vma_pages(vma); - - return 0; -} - /* * do_vmi_align_munmap() - munmap the aligned region from @start to @end. * @vmi: The vma iterator @@ -2354,6 +2339,7 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, struct maple_tree mt_detach; int count = 0; int error = -ENOMEM; + unsigned long locked_vm = 0; MA_STATE(mas_detach, &mt_detach, 0, 0); mt_init_flags(&mt_detach, vmi->mas.tree->ma_flags & MT_FLAGS_LOCK_MASK); mt_set_external_lock(&mt_detach, &mm->mmap_lock); @@ -2399,9 +2385,13 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, if (error) goto end_split_failed; } - error = munmap_sidetree(next, &mas_detach); - if (error) - goto munmap_sidetree_failed; + vma_start_write(next); + mas_set_range(&mas_detach, next->vm_start, next->vm_end - 1); + if (mas_store_gfp(&mas_detach, next, GFP_KERNEL)) + goto munmap_gather_failed; + vma_mark_detached(next, true); + if (next->vm_flags & VM_LOCKED) + locked_vm += vma_pages(next); count++; #ifdef CONFIG_DEBUG_VM_MAPLE_TREE @@ -2447,10 +2437,12 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, } #endif /* Point of no return */ + error = -ENOMEM; vma_iter_set(vmi, start); if (vma_iter_clear_gfp(vmi, start, end, GFP_KERNEL)) - return -ENOMEM; + goto clear_tree_failed; + mm->locked_vm -= locked_vm; mm->map_count -= count; /* * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or @@ -2480,9 +2472,14 @@ do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, validate_mm(mm); return downgrade ? 1 : 0; +clear_tree_failed: userfaultfd_error: -munmap_sidetree_failed: +munmap_gather_failed: end_split_failed: + mas_set(&mas_detach, 0); + mas_for_each(&mas_detach, next, end) + vma_mark_detached(next, false); + __mt_destroy(&mt_detach); start_split_failed: map_count_exceeded: diff --git a/mm/page_table_check.c b/mm/page_table_check.c index 25d8610c0042..f2baf97d5f38 100644 --- a/mm/page_table_check.c +++ b/mm/page_table_check.c @@ -71,6 +71,8 @@ static void page_table_check_clear(struct mm_struct *mm, unsigned long addr, page = pfn_to_page(pfn); page_ext = page_ext_get(page); + + BUG_ON(PageSlab(page)); anon = PageAnon(page); for (i = 0; i < pgcnt; i++) { @@ -107,6 +109,8 @@ static void page_table_check_set(struct mm_struct *mm, unsigned long addr, page = pfn_to_page(pfn); page_ext = page_ext_get(page); + + BUG_ON(PageSlab(page)); anon = PageAnon(page); for (i = 0; i < pgcnt; i++) { @@ -133,6 +137,8 @@ void __page_table_check_zero(struct page *page, unsigned int order) struct page_ext *page_ext; unsigned long i; + BUG_ON(PageSlab(page)); + page_ext = page_ext_get(page); BUG_ON(!page_ext); for (i = 0; i < (1ul << order); i++) { diff --git a/mm/shrinker_debug.c b/mm/shrinker_debug.c index 3f83b10c5031..fe10436d9911 100644 --- a/mm/shrinker_debug.c +++ b/mm/shrinker_debug.c @@ -237,7 +237,8 @@ int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...) } EXPORT_SYMBOL(shrinker_debugfs_rename); -struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker) +struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker, + int *debugfs_id) { struct dentry *entry = shrinker->debugfs_entry; @@ -246,14 +247,18 @@ struct dentry *shrinker_debugfs_remove(struct shrinker *shrinker) kfree_const(shrinker->name); shrinker->name = NULL; - if (entry) { - ida_free(&shrinker_debugfs_ida, shrinker->debugfs_id); - shrinker->debugfs_entry = NULL; - } + *debugfs_id = entry ? shrinker->debugfs_id : -1; + shrinker->debugfs_entry = NULL; return entry; } +void shrinker_debugfs_remove(struct dentry *debugfs_entry, int debugfs_id) +{ + debugfs_remove_recursive(debugfs_entry); + ida_free(&shrinker_debugfs_ida, debugfs_id); +} + static int __init shrinker_debugfs_init(void) { struct shrinker *shrinker; diff --git a/mm/vmscan.c b/mm/vmscan.c index d257916f39e5..6d0cd2840cf0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -805,6 +805,7 @@ EXPORT_SYMBOL(register_shrinker); void unregister_shrinker(struct shrinker *shrinker) { struct dentry *debugfs_entry; + int debugfs_id; if (!(shrinker->flags & SHRINKER_REGISTERED)) return; @@ -814,13 +815,13 @@ void unregister_shrinker(struct shrinker *shrinker) shrinker->flags &= ~SHRINKER_REGISTERED; if (shrinker->flags & SHRINKER_MEMCG_AWARE) unregister_memcg_shrinker(shrinker); - debugfs_entry = shrinker_debugfs_remove(shrinker); + debugfs_entry = shrinker_debugfs_detach(shrinker, &debugfs_id); mutex_unlock(&shrinker_mutex); atomic_inc(&shrinker_srcu_generation); synchronize_srcu(&shrinker_srcu); - debugfs_remove_recursive(debugfs_entry); + shrinker_debugfs_remove(debugfs_entry, debugfs_id); kfree(shrinker->nr_deferred); shrinker->nr_deferred = NULL; diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 44ddaf5d601e..02f7f414aade 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1331,31 +1331,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, obj_to_location(obj, &page, &obj_idx); zspage = get_zspage(page); -#ifdef CONFIG_ZPOOL - /* - * Move the zspage to front of pool's LRU. - * - * Note that this is swap-specific, so by definition there are no ongoing - * accesses to the memory while the page is swapped out that would make - * it "hot". A new entry is hot, then ages to the tail until it gets either - * written back or swaps back in. - * - * Furthermore, map is also called during writeback. We must not put an - * isolated page on the LRU mid-reclaim. - * - * As a result, only update the LRU when the page is mapped for write - * when it's first instantiated. - * - * This is a deviation from the other backends, which perform this update - * in the allocation function (zbud_alloc, z3fold_alloc). - */ - if (mm == ZS_MM_WO) { - if (!list_empty(&zspage->lru)) - list_del(&zspage->lru); - list_add(&zspage->lru, &pool->lru); - } -#endif - /* * migration cannot move any zpages in this zspage. Here, pool->lock * is too heavy since callers would take some time until they calls @@ -1525,9 +1500,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) fix_fullness_group(class, zspage); record_obj(handle, obj); class_stat_inc(class, ZS_OBJS_INUSE, 1); - spin_unlock(&pool->lock); - return handle; + goto out; } spin_unlock(&pool->lock); @@ -1550,6 +1524,14 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp) /* We completely set up zspage so mark them as movable */ SetZsPageMovable(pool, zspage); +out: +#ifdef CONFIG_ZPOOL + /* Add/move zspage to beginning of LRU */ + if (!list_empty(&zspage->lru)) + list_del(&zspage->lru); + list_add(&zspage->lru, &pool->lru); +#endif + spin_unlock(&pool->lock); return handle; diff --git a/mm/zswap.c b/mm/zswap.c index e1e621d0b6a0..30092d9a3b23 100644 --- a/mm/zswap.c +++ b/mm/zswap.c @@ -1020,6 +1020,22 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle) goto fail; case ZSWAP_SWAPCACHE_NEW: /* page is locked */ + /* + * Having a local reference to the zswap entry doesn't exclude + * swapping from invalidating and recycling the swap slot. Once + * the swapcache is secured against concurrent swapping to and + * from the slot, recheck that the entry is still current before + * writing. + */ + spin_lock(&tree->lock); + if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) { + spin_unlock(&tree->lock); + delete_from_swap_cache(page_folio(page)); + ret = -ENOMEM; + goto fail; + } + spin_unlock(&tree->lock); + /* decompress */ acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx); dlen = PAGE_SIZE; @@ -1158,9 +1174,16 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset, goto reject; } + /* + * XXX: zswap reclaim does not work with cgroups yet. Without a + * cgroup-aware entry LRU, we will push out entries system-wide based on + * local cgroup limits. + */ objcg = get_obj_cgroup_from_page(page); - if (objcg && !obj_cgroup_may_zswap(objcg)) - goto shrink; + if (objcg && !obj_cgroup_may_zswap(objcg)) { + ret = -ENOMEM; + goto reject; + } /* reclaim space if needed */ if (zswap_is_full()) { |