diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 2 | ||||
-rw-r--r-- | mm/filemap.c | 278 | ||||
-rw-r--r-- | mm/huge_memory.c | 3 | ||||
-rw-r--r-- | mm/hugetlb.c | 29 | ||||
-rw-r--r-- | mm/khugepaged.c | 4 | ||||
-rw-r--r-- | mm/memfd.c | 2 | ||||
-rw-r--r-- | mm/memory-failure.c | 9 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/migrate.c | 2 | ||||
-rw-r--r-- | mm/oom_kill.c | 12 | ||||
-rw-r--r-- | mm/page_alloc.c | 3 | ||||
-rw-r--r-- | mm/page_idle.c | 4 | ||||
-rw-r--r-- | mm/page_io.c | 20 | ||||
-rw-r--r-- | mm/shmem.c | 2 | ||||
-rw-r--r-- | mm/swap_state.c | 4 | ||||
-rw-r--r-- | mm/vmalloc.c | 15 | ||||
-rw-r--r-- | mm/vmscan.c | 27 |
17 files changed, 267 insertions, 151 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index f0c76ba47695..ef6efedc5921 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -166,7 +166,7 @@ config MEMORY_HOTPLUG_DEFAULT_ONLINE onlining policy (/sys/devices/system/memory/auto_online_blocks) which determines what happens to newly added memory regions. Policy setting can always be changed at runtime. - See Documentation/memory-hotplug.txt for more information. + See Documentation/admin-guide/mm/memory-hotplug.rst for more information. Say Y here if you want all hot-plugged memory blocks to appear in 'online' state by default. diff --git a/mm/filemap.c b/mm/filemap.c index df2006ba0cfa..f1aa20ab8434 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -281,11 +281,11 @@ EXPORT_SYMBOL(delete_from_page_cache); * @pvec: pagevec with pages to delete * * The function walks over mapping->i_pages and removes pages passed in @pvec - * from the mapping. The function expects @pvec to be sorted by page index - * and is optimised for it to be dense. + * from the mapping. The function expects @pvec to be sorted by page index. * It tolerates holes in @pvec (mapping entries at those indices are not * modified). The function expects only THP head pages to be present in the - * @pvec. + * @pvec and takes care to delete all corresponding tail pages from the + * mapping as well. * * The function expects the i_pages lock to be held. */ @@ -294,44 +294,40 @@ static void page_cache_delete_batch(struct address_space *mapping, { XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); int total_pages = 0; - int i = 0; + int i = 0, tail_pages = 0; struct page *page; mapping_set_update(&xas, mapping); xas_for_each(&xas, page, ULONG_MAX) { - if (i >= pagevec_count(pvec)) + if (i >= pagevec_count(pvec) && !tail_pages) break; - - /* A swap/dax/shadow entry got inserted? Skip it. */ if (xa_is_value(page)) continue; - /* - * A page got inserted in our range? Skip it. We have our - * pages locked so they are protected from being removed. - * If we see a page whose index is higher than ours, it - * means our page has been removed, which shouldn't be - * possible because we're holding the PageLock. - */ - if (page != pvec->pages[i]) { - VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, - page); - continue; - } - - WARN_ON_ONCE(!PageLocked(page)); - - if (page->index == xas.xa_index) + if (!tail_pages) { + /* + * Some page got inserted in our range? Skip it. We + * have our pages locked so they are protected from + * being removed. + */ + if (page != pvec->pages[i]) { + VM_BUG_ON_PAGE(page->index > + pvec->pages[i]->index, page); + continue; + } + WARN_ON_ONCE(!PageLocked(page)); + if (PageTransHuge(page) && !PageHuge(page)) + tail_pages = HPAGE_PMD_NR - 1; page->mapping = NULL; - /* Leave page->index set: truncation lookup relies on it */ - - /* - * Move to the next page in the vector if this is a regular - * page or the index is of the last sub-page of this compound - * page. - */ - if (page->index + (1UL << compound_order(page)) - 1 == - xas.xa_index) + /* + * Leave page->index set: truncation lookup relies + * upon it + */ i++; + } else { + VM_BUG_ON_PAGE(page->index + HPAGE_PMD_NR - tail_pages + != pvec->pages[i]->index, page); + tail_pages--; + } xas_store(&xas, NULL); total_pages++; } @@ -554,6 +550,28 @@ int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, EXPORT_SYMBOL(filemap_fdatawait_range); /** + * filemap_fdatawait_range_keep_errors - wait for writeback to complete + * @mapping: address space structure to wait for + * @start_byte: offset in bytes where the range starts + * @end_byte: offset in bytes where the range ends (inclusive) + * + * Walk the list of under-writeback pages of the given address space in the + * given range and wait for all of them. Unlike filemap_fdatawait_range(), + * this function does not clear error status of the address space. + * + * Use this function if callers don't handle errors themselves. Expected + * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2), + * fsfreeze(8) + */ +int filemap_fdatawait_range_keep_errors(struct address_space *mapping, + loff_t start_byte, loff_t end_byte) +{ + __filemap_fdatawait_range(mapping, start_byte, end_byte); + return filemap_check_and_keep_errors(mapping); +} +EXPORT_SYMBOL(filemap_fdatawait_range_keep_errors); + +/** * file_fdatawait_range - wait for writeback to complete * @file: file pointing to address space structure to wait for * @start_byte: offset in bytes where the range starts @@ -1498,7 +1516,7 @@ EXPORT_SYMBOL(page_cache_prev_miss); struct page *find_get_entry(struct address_space *mapping, pgoff_t offset) { XA_STATE(xas, &mapping->i_pages, offset); - struct page *page; + struct page *head, *page; rcu_read_lock(); repeat: @@ -1513,19 +1531,25 @@ repeat: if (!page || xa_is_value(page)) goto out; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) + goto repeat; + + /* The page was split under us? */ + if (compound_head(page) != head) { + put_page(head); goto repeat; + } /* - * Has the page moved or been split? + * Has the page moved? * This is part of the lockless pagecache protocol. See * include/linux/pagemap.h for details. */ if (unlikely(page != xas_reload(&xas))) { - put_page(page); + put_page(head); goto repeat; } - page = find_subpage(page, offset); out: rcu_read_unlock(); @@ -1707,6 +1731,7 @@ unsigned find_get_entries(struct address_space *mapping, rcu_read_lock(); xas_for_each(&xas, page, ULONG_MAX) { + struct page *head; if (xas_retry(&xas, page)) continue; /* @@ -1717,13 +1742,17 @@ unsigned find_get_entries(struct address_space *mapping, if (xa_is_value(page)) goto export; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - page = find_subpage(page, xas.xa_index); export: indices[ret] = xas.xa_index; @@ -1732,7 +1761,7 @@ export: break; continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -1774,27 +1803,33 @@ unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, rcu_read_lock(); xas_for_each(&xas, page, end) { + struct page *head; if (xas_retry(&xas, page)) continue; /* Skip over shadow, swap and DAX entries */ if (xa_is_value(page)) continue; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - pages[ret] = find_subpage(page, xas.xa_index); + pages[ret] = page; if (++ret == nr_pages) { *start = xas.xa_index + 1; goto out; } continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -1839,6 +1874,7 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, rcu_read_lock(); for (page = xas_load(&xas); page; page = xas_next(&xas)) { + struct page *head; if (xas_retry(&xas, page)) continue; /* @@ -1848,19 +1884,24 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, if (xa_is_value(page)) break; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - pages[ret] = find_subpage(page, xas.xa_index); + pages[ret] = page; if (++ret == nr_pages) break; continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -1896,6 +1937,7 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, rcu_read_lock(); xas_for_each_marked(&xas, page, end, tag) { + struct page *head; if (xas_retry(&xas, page)) continue; /* @@ -1906,21 +1948,26 @@ unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, if (xa_is_value(page)) continue; - if (!page_cache_get_speculative(page)) + head = compound_head(page); + if (!page_cache_get_speculative(head)) goto retry; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto put_page; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto put_page; - pages[ret] = find_subpage(page, xas.xa_index); + pages[ret] = page; if (++ret == nr_pages) { *index = xas.xa_index + 1; goto out; } continue; put_page: - put_page(page); + put_page(head); retry: xas_reset(&xas); } @@ -2603,7 +2650,7 @@ void filemap_map_pages(struct vm_fault *vmf, pgoff_t last_pgoff = start_pgoff; unsigned long max_idx; XA_STATE(xas, &mapping->i_pages, start_pgoff); - struct page *page; + struct page *head, *page; rcu_read_lock(); xas_for_each(&xas, page, end_pgoff) { @@ -2612,19 +2659,24 @@ void filemap_map_pages(struct vm_fault *vmf, if (xa_is_value(page)) goto next; + head = compound_head(page); + /* * Check for a locked page first, as a speculative * reference may adversely influence page migration. */ - if (PageLocked(page)) + if (PageLocked(head)) goto next; - if (!page_cache_get_speculative(page)) + if (!page_cache_get_speculative(head)) goto next; - /* Has the page moved or been split? */ + /* The page was split under us? */ + if (compound_head(page) != head) + goto skip; + + /* Has the page moved? */ if (unlikely(page != xas_reload(&xas))) goto skip; - page = find_subpage(page, xas.xa_index); if (!PageUptodate(page) || PageReadahead(page) || @@ -2895,24 +2947,11 @@ EXPORT_SYMBOL(read_cache_page_gfp); * LFS limits. If pos is under the limit it becomes a short access. If it * exceeds the limit we return -EFBIG. */ -static int generic_access_check_limits(struct file *file, loff_t pos, - loff_t *count) -{ - struct inode *inode = file->f_mapping->host; - loff_t max_size = inode->i_sb->s_maxbytes; - - if (!(file->f_flags & O_LARGEFILE)) - max_size = MAX_NON_LFS; - - if (unlikely(pos >= max_size)) - return -EFBIG; - *count = min(*count, max_size - pos); - return 0; -} - static int generic_write_check_limits(struct file *file, loff_t pos, loff_t *count) { + struct inode *inode = file->f_mapping->host; + loff_t max_size = inode->i_sb->s_maxbytes; loff_t limit = rlimit(RLIMIT_FSIZE); if (limit != RLIM_INFINITY) { @@ -2923,7 +2962,15 @@ static int generic_write_check_limits(struct file *file, loff_t pos, *count = min(*count, limit - pos); } - return generic_access_check_limits(file, pos, count); + if (!(file->f_flags & O_LARGEFILE)) + max_size = MAX_NON_LFS; + + if (unlikely(pos >= max_size)) + return -EFBIG; + + *count = min(*count, max_size - pos); + + return 0; } /* @@ -2963,7 +3010,7 @@ EXPORT_SYMBOL(generic_write_checks); /* * Performs necessary checks before doing a clone. * - * Can adjust amount of bytes to clone. + * Can adjust amount of bytes to clone via @req_count argument. * Returns appropriate error code that caller should return or * zero in case the clone should be allowed. */ @@ -3001,10 +3048,6 @@ int generic_remap_checks(struct file *file_in, loff_t pos_in, return -EINVAL; count = min(count, size_in - (uint64_t)pos_in); - ret = generic_access_check_limits(file_in, pos_in, &count); - if (ret) - return ret; - ret = generic_write_check_limits(file_out, pos_out, &count); if (ret) return ret; @@ -3041,6 +3084,83 @@ int generic_remap_checks(struct file *file_in, loff_t pos_in, return 0; } + +/* + * Performs common checks before doing a file copy/clone + * from @file_in to @file_out. + */ +int generic_file_rw_checks(struct file *file_in, struct file *file_out) +{ + struct inode *inode_in = file_inode(file_in); + struct inode *inode_out = file_inode(file_out); + + /* Don't copy dirs, pipes, sockets... */ + if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) + return -EISDIR; + if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) + return -EINVAL; + + if (!(file_in->f_mode & FMODE_READ) || + !(file_out->f_mode & FMODE_WRITE) || + (file_out->f_flags & O_APPEND)) + return -EBADF; + + return 0; +} + +/* + * Performs necessary checks before doing a file copy + * + * Can adjust amount of bytes to copy via @req_count argument. + * Returns appropriate error code that caller should return or + * zero in case the copy should be allowed. + */ +int generic_copy_file_checks(struct file *file_in, loff_t pos_in, + struct file *file_out, loff_t pos_out, + size_t *req_count, unsigned int flags) +{ + struct inode *inode_in = file_inode(file_in); + struct inode *inode_out = file_inode(file_out); + uint64_t count = *req_count; + loff_t size_in; + int ret; + + ret = generic_file_rw_checks(file_in, file_out); + if (ret) + return ret; + + /* Don't touch certain kinds of inodes */ + if (IS_IMMUTABLE(inode_out)) + return -EPERM; + + if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out)) + return -ETXTBSY; + + /* Ensure offsets don't wrap. */ + if (pos_in + count < pos_in || pos_out + count < pos_out) + return -EOVERFLOW; + + /* Shorten the copy to EOF */ + size_in = i_size_read(inode_in); + if (pos_in >= size_in) + count = 0; + else + count = min(count, size_in - (uint64_t)pos_in); + + ret = generic_write_check_limits(file_out, pos_out, &count); + if (ret) + return ret; + + /* Don't allow overlapped copying within the same file. */ + if (inode_in == inode_out && + pos_out + count > pos_in && + pos_out < pos_in + count) + return -EINVAL; + + *req_count = count; + return 0; +} + int pagecache_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **pagep, void **fsdata) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index bb8b617e34ed..885642c82aaa 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2496,9 +2496,6 @@ static void __split_huge_page(struct page *page, struct list_head *list, if (IS_ENABLED(CONFIG_SHMEM) && PageSwapBacked(head)) shmem_uncharge(head->mapping->host, 1); put_page(head + i); - } else if (!PageAnon(page)) { - __xa_store(&head->mapping->i_pages, head[i].index, - head + i, 0); } } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ac843d32b019..ede7e7f5d1ab 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1510,16 +1510,29 @@ static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed, /* * Dissolve a given free hugepage into free buddy pages. This function does - * nothing for in-use (including surplus) hugepages. Returns -EBUSY if the - * dissolution fails because a give page is not a free hugepage, or because - * free hugepages are fully reserved. + * nothing for in-use hugepages and non-hugepages. + * This function returns values like below: + * + * -EBUSY: failed to dissolved free hugepages or the hugepage is in-use + * (allocated or reserved.) + * 0: successfully dissolved free hugepages or the page is not a + * hugepage (considered as already dissolved) */ int dissolve_free_huge_page(struct page *page) { int rc = -EBUSY; + /* Not to disrupt normal path by vainly holding hugetlb_lock */ + if (!PageHuge(page)) + return 0; + spin_lock(&hugetlb_lock); - if (PageHuge(page) && !page_count(page)) { + if (!PageHuge(page)) { + rc = 0; + goto out; + } + + if (!page_count(page)) { struct page *head = compound_head(page); struct hstate *h = page_hstate(head); int nid = page_to_nid(head); @@ -1564,11 +1577,9 @@ int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn) for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) { page = pfn_to_page(pfn); - if (PageHuge(page) && !page_count(page)) { - rc = dissolve_free_huge_page(page); - if (rc) - break; - } + rc = dissolve_free_huge_page(page); + if (rc) + break; } return rc; diff --git a/mm/khugepaged.c b/mm/khugepaged.c index 0f7419938008..eaaa21b23215 100644 --- a/mm/khugepaged.c +++ b/mm/khugepaged.c @@ -1378,7 +1378,7 @@ static void collapse_shmem(struct mm_struct *mm, result = SCAN_FAIL; goto xa_locked; } - xas_store(&xas, new_page); + xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); nr_none++; continue; } @@ -1454,7 +1454,7 @@ static void collapse_shmem(struct mm_struct *mm, list_add_tail(&page->lru, &pagelist); /* Finally, replace with the new page. */ - xas_store(&xas, new_page); + xas_store(&xas, new_page + (index % HPAGE_PMD_NR)); continue; out_unlock: unlock_page(page); diff --git a/mm/memfd.c b/mm/memfd.c index 2647c898990c..650e65a46b9c 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -39,7 +39,6 @@ static void memfd_tag_pins(struct xa_state *xas) xas_for_each(xas, page, ULONG_MAX) { if (xa_is_value(page)) continue; - page = find_subpage(page, xas->xa_index); if (page_count(page) - page_mapcount(page) > 1) xas_set_mark(xas, MEMFD_TAG_PINNED); @@ -89,7 +88,6 @@ static int memfd_wait_for_pins(struct address_space *mapping) bool clear = true; if (xa_is_value(page)) continue; - page = find_subpage(page, xas.xa_index); if (page_count(page) - page_mapcount(page) != 1) { /* * On the last scan, we clean up all those tags diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 8da0334b9ca0..f045514d8d20 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -218,7 +218,7 @@ static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) { ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)tk->addr, - addr_lsb, current); + addr_lsb); } else { /* * Don't use force here, it's convenient if the signal @@ -1730,6 +1730,8 @@ static int soft_offline_huge_page(struct page *page, int flags) if (!ret) { if (set_hwpoison_free_buddy_page(page)) num_poisoned_pages_inc(); + else + ret = -EBUSY; } } return ret; @@ -1854,11 +1856,8 @@ static int soft_offline_in_use_page(struct page *page, int flags) static int soft_offline_free_page(struct page *page) { - int rc = 0; - struct page *head = compound_head(page); + int rc = dissolve_free_huge_page(page); - if (PageHuge(head)) - rc = dissolve_free_huge_page(page); if (!rc) { if (set_hwpoison_free_buddy_page(page)) num_poisoned_pages_inc(); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 01600d80ae01..fdcb73536319 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -306,7 +306,7 @@ static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes) else { nodes_remap(tmp, pol->v.nodes,pol->w.cpuset_mems_allowed, *nodes); - pol->w.cpuset_mems_allowed = tmp; + pol->w.cpuset_mems_allowed = *nodes; } if (nodes_empty(tmp)) diff --git a/mm/migrate.c b/mm/migrate.c index f2ecc2855a12..e9594bc0d406 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -463,7 +463,7 @@ int migrate_page_move_mapping(struct address_space *mapping, for (i = 1; i < HPAGE_PMD_NR; i++) { xas_next(&xas); - xas_store(&xas, newpage); + xas_store(&xas, newpage + i); } } diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 5a58778c91d4..f719b64741d6 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -987,8 +987,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message) /* * Determines whether the kernel must panic because of the panic_on_oom sysctl. */ -static void check_panic_on_oom(struct oom_control *oc, - enum oom_constraint constraint) +static void check_panic_on_oom(struct oom_control *oc) { if (likely(!sysctl_panic_on_oom)) return; @@ -998,7 +997,7 @@ static void check_panic_on_oom(struct oom_control *oc, * does not panic for cpuset, mempolicy, or memcg allocation * failures. */ - if (constraint != CONSTRAINT_NONE) + if (oc->constraint != CONSTRAINT_NONE) return; } /* Do not panic for oom kills triggered by sysrq */ @@ -1035,7 +1034,6 @@ EXPORT_SYMBOL_GPL(unregister_oom_notifier); bool out_of_memory(struct oom_control *oc) { unsigned long freed = 0; - enum oom_constraint constraint = CONSTRAINT_NONE; if (oom_killer_disabled) return false; @@ -1071,10 +1069,10 @@ bool out_of_memory(struct oom_control *oc) * Check if there were limitations on the allocation (only relevant for * NUMA and memcg) that may require different handling. */ - constraint = constrained_alloc(oc); - if (constraint != CONSTRAINT_MEMORY_POLICY) + oc->constraint = constrained_alloc(oc); + if (oc->constraint != CONSTRAINT_MEMORY_POLICY) oc->nodemask = NULL; - check_panic_on_oom(oc, constraint); + check_panic_on_oom(oc); if (!is_memcg_oom(oc) && sysctl_oom_kill_allocating_task && current->mm && !oom_unkillable_task(current, NULL, oc->nodemask) && diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d66bc8abe0af..8e3bc949ebcc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1826,7 +1826,8 @@ deferred_grow_zone(struct zone *zone, unsigned int order) first_deferred_pfn)) { pgdat->first_deferred_pfn = ULONG_MAX; pgdat_resize_unlock(pgdat, &flags); - return true; + /* Retry only once. */ + return first_deferred_pfn != ULONG_MAX; } /* diff --git a/mm/page_idle.c b/mm/page_idle.c index 0b39ec0c945c..295512465065 100644 --- a/mm/page_idle.c +++ b/mm/page_idle.c @@ -136,7 +136,7 @@ static ssize_t page_idle_bitmap_read(struct file *file, struct kobject *kobj, end_pfn = pfn + count * BITS_PER_BYTE; if (end_pfn > max_pfn) - end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS); + end_pfn = max_pfn; for (; pfn < end_pfn; pfn++) { bit = pfn % BITMAP_CHUNK_BITS; @@ -181,7 +181,7 @@ static ssize_t page_idle_bitmap_write(struct file *file, struct kobject *kobj, end_pfn = pfn + count * BITS_PER_BYTE; if (end_pfn > max_pfn) - end_pfn = ALIGN(max_pfn, BITMAP_CHUNK_BITS); + end_pfn = max_pfn; for (; pfn < end_pfn; pfn++) { bit = pfn % BITMAP_CHUNK_BITS; diff --git a/mm/page_io.c b/mm/page_io.c index 2e8019d0e048..a39aac2f8c8d 100644 --- a/mm/page_io.c +++ b/mm/page_io.c @@ -29,10 +29,9 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, struct page *page, bio_end_io_t end_io) { - int i, nr = hpage_nr_pages(page); struct bio *bio; - bio = bio_alloc(gfp_flags, nr); + bio = bio_alloc(gfp_flags, 1); if (bio) { struct block_device *bdev; @@ -41,9 +40,7 @@ static struct bio *get_swap_bio(gfp_t gfp_flags, bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9; bio->bi_end_io = end_io; - for (i = 0; i < nr; i++) - bio_add_page(bio, page + i, PAGE_SIZE, 0); - VM_BUG_ON(bio->bi_iter.bi_size != PAGE_SIZE * nr); + bio_add_page(bio, page, PAGE_SIZE * hpage_nr_pages(page), 0); } return bio; } @@ -140,8 +137,10 @@ out: unlock_page(page); WRITE_ONCE(bio->bi_private, NULL); bio_put(bio); - blk_wake_io_task(waiter); - put_task_struct(waiter); + if (waiter) { + blk_wake_io_task(waiter); + put_task_struct(waiter); + } } int generic_swapfile_activate(struct swap_info_struct *sis, @@ -398,11 +397,12 @@ int swap_readpage(struct page *page, bool synchronous) * Keep this task valid during swap readpage because the oom killer may * attempt to access it in the page fault retry time check. */ - get_task_struct(current); - bio->bi_private = current; bio_set_op_attrs(bio, REQ_OP_READ, 0); - if (synchronous) + if (synchronous) { bio->bi_opf |= REQ_HIPRI; + get_task_struct(current); + bio->bi_private = current; + } count_vm_event(PSWPIN); bio_get(bio); qc = submit_bio(bio); diff --git a/mm/shmem.c b/mm/shmem.c index 1bb3b8dc8bb2..f4dce9c8670d 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -614,7 +614,7 @@ static int shmem_add_to_page_cache(struct page *page, if (xas_error(&xas)) goto unlock; next: - xas_store(&xas, page); + xas_store(&xas, page + i); if (++i < nr) { xas_next(&xas); goto next; diff --git a/mm/swap_state.c b/mm/swap_state.c index eb714165afd2..85245fdec8d9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -132,7 +132,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp) for (i = 0; i < nr; i++) { VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); set_page_private(page + i, entry.val + i); - xas_store(&xas, page); + xas_store(&xas, page + i); xas_next(&xas); } address_space->nrpages += nr; @@ -167,7 +167,7 @@ void __delete_from_swap_cache(struct page *page, swp_entry_t entry) for (i = 0; i < nr; i++) { void *entry = xas_store(&xas, NULL); - VM_BUG_ON_PAGE(entry != page, entry); + VM_BUG_ON_PAGE(entry != page + i, entry); set_page_private(page + i, 0); xas_next(&xas); } diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 4c9e150e5ad3..030a544e6602 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -913,7 +913,7 @@ adjust_va_to_fit_type(struct vmap_area *va, unsigned long nva_start_addr, unsigned long size, enum fit_type type) { - struct vmap_area *lva; + struct vmap_area *lva = NULL; if (type == FL_FIT_TYPE) { /* @@ -972,7 +972,7 @@ adjust_va_to_fit_type(struct vmap_area *va, if (type != FL_FIT_TYPE) { augment_tree_propagate_from(va); - if (type == NE_FIT_TYPE) + if (lva) /* type == NE_FIT_TYPE */ insert_vmap_area_augment(lva, &va->rb_node, &free_vmap_area_root, &free_vmap_area_list); } @@ -2128,17 +2128,6 @@ static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages) int flush_dmap = 0; int i; - /* - * The below block can be removed when all architectures that have - * direct map permissions also have set_direct_map_() implementations. - * This is concerned with resetting the direct map any an vm alias with - * execute permissions, without leaving a RW+X window. - */ - if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) { - set_memory_nx((unsigned long)area->addr, area->nr_pages); - set_memory_rw((unsigned long)area->addr, area->nr_pages); - } - remove_vm_area(area->addr); /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 7889f583ced9..910e02c793ff 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3644,19 +3644,18 @@ out: } /* - * pgdat->kswapd_classzone_idx is the highest zone index that a recent - * allocation request woke kswapd for. When kswapd has not woken recently, - * the value is MAX_NR_ZONES which is not a valid index. This compares a - * given classzone and returns it or the highest classzone index kswapd - * was recently woke for. + * The pgdat->kswapd_classzone_idx is used to pass the highest zone index to be + * reclaimed by kswapd from the waker. If the value is MAX_NR_ZONES which is not + * a valid index then either kswapd runs for first time or kswapd couldn't sleep + * after previous reclaim attempt (node is still unbalanced). In that case + * return the zone index of the previous kswapd reclaim cycle. */ static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat, - enum zone_type classzone_idx) + enum zone_type prev_classzone_idx) { if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) - return classzone_idx; - - return max(pgdat->kswapd_classzone_idx, classzone_idx); + return prev_classzone_idx; + return pgdat->kswapd_classzone_idx; } static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order, @@ -3797,7 +3796,7 @@ kswapd_try_sleep: /* Read the new order and classzone_idx */ alloc_order = reclaim_order = pgdat->kswapd_order; - classzone_idx = kswapd_classzone_idx(pgdat, 0); + classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx); pgdat->kswapd_order = 0; pgdat->kswapd_classzone_idx = MAX_NR_ZONES; @@ -3851,8 +3850,12 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order, if (!cpuset_zone_allowed(zone, gfp_flags)) return; pgdat = zone->zone_pgdat; - pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, - classzone_idx); + + if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES) + pgdat->kswapd_classzone_idx = classzone_idx; + else + pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx, + classzone_idx); pgdat->kswapd_order = max(pgdat->kswapd_order, order); if (!waitqueue_active(&pgdat->kswapd_wait)) return; |