summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/Makefile1
-rw-r--r--mm/backing-dev.c20
-rw-r--r--mm/bootmem.c1
-rw-r--r--mm/cma.h1
-rw-r--r--mm/cma_debug.c1
-rw-r--r--mm/compaction.c1
-rw-r--r--mm/debug.c1
-rw-r--r--mm/debug_page_ref.c1
-rw-r--r--mm/early_ioremap.c1
-rw-r--r--mm/fadvise.c1
-rw-r--r--mm/failslab.c1
-rw-r--r--mm/frame_vector.c1
-rw-r--r--mm/gup.c97
-rw-r--r--mm/highmem.c1
-rw-r--r--mm/huge_memory.c13
-rw-r--r--mm/hugetlb.c32
-rw-r--r--mm/init-mm.c1
-rw-r--r--mm/kasan/Makefile1
-rw-r--r--mm/kasan/kasan.h1
-rw-r--r--mm/khugepaged.c1
-rw-r--r--mm/kmemcheck.c1
-rw-r--r--mm/madvise.c1
-rw-r--r--mm/memcontrol.c15
-rw-r--r--mm/memory.c6
-rw-r--r--mm/mempool.c1
-rw-r--r--mm/memtest.c1
-rw-r--r--mm/migrate.c1
-rw-r--r--mm/mincore.c1
-rw-r--r--mm/mlock.c1
-rw-r--r--mm/mmzone.c1
-rw-r--r--mm/mprotect.c1
-rw-r--r--mm/mremap.c1
-rw-r--r--mm/msync.c1
-rw-r--r--mm/nobootmem.c1
-rw-r--r--mm/page-writeback.c36
-rw-r--r--mm/page_counter.c1
-rw-r--r--mm/page_ext.c1
-rw-r--r--mm/page_idle.c1
-rw-r--r--mm/page_io.c3
-rw-r--r--mm/page_isolation.c1
-rw-r--r--mm/page_owner.c1
-rw-r--r--mm/page_poison.c1
-rw-r--r--mm/page_vma_mapped.c1
-rw-r--r--mm/pagewalk.c7
-rw-r--r--mm/percpu-internal.h1
-rw-r--r--mm/percpu.c18
-rw-r--r--mm/pgtable-generic.c1
-rw-r--r--mm/quicklist.c1
-rw-r--r--mm/slab.c1
-rw-r--r--mm/slab.h3
-rw-r--r--mm/slab_common.c1
-rw-r--r--mm/slob.c1
-rw-r--r--mm/slub.c1
-rw-r--r--mm/sparse-vmemmap.c1
-rw-r--r--mm/sparse.c28
-rw-r--r--mm/swap_cgroup.c1
-rw-r--r--mm/swap_slots.c1
-rw-r--r--mm/swap_state.c1
-rw-r--r--mm/swapfile.c23
-rw-r--r--mm/vmacache.c1
-rw-r--r--mm/vmscan.c3
-rw-r--r--mm/workingset.c1
62 files changed, 228 insertions, 124 deletions
diff --git a/mm/Makefile b/mm/Makefile
index e3ac3aeb533b..4659b93cba43 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
#
# Makefile for the linux memory manager.
#
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index e19606bb41a0..74b52dfd5852 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -1072,23 +1072,3 @@ out:
return ret;
}
EXPORT_SYMBOL(wait_iff_congested);
-
-int pdflush_proc_obsolete(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- char kbuf[] = "0\n";
-
- if (*ppos || *lenp < sizeof(kbuf)) {
- *lenp = 0;
- return 0;
- }
-
- if (copy_to_user(buffer, kbuf, sizeof(kbuf)))
- return -EFAULT;
- pr_warn_once("%s exported in /proc is scheduled for removal\n",
- table->procname);
-
- *lenp = 2;
- *ppos += *lenp;
- return 2;
-}
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 9fedb27c6451..6aef64254203 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bootmem - A boot-time physical memory allocator and configurator
*
diff --git a/mm/cma.h b/mm/cma.h
index 49861286279d..33c0b517733c 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MM_CMA_H__
#define __MM_CMA_H__
diff --git a/mm/cma_debug.c b/mm/cma_debug.c
index c03ccbc405a0..275df8b5b22e 100644
--- a/mm/cma_debug.c
+++ b/mm/cma_debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* CMA DebugFS Interface
*
diff --git a/mm/compaction.c b/mm/compaction.c
index 03d31a875341..85395dc6eb13 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/compaction.c
*
diff --git a/mm/debug.c b/mm/debug.c
index 5715448ab0b5..6726bec731c9 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mm/debug.c
*
diff --git a/mm/debug_page_ref.c b/mm/debug_page_ref.c
index 1aef3d562e52..f3b2c9d3ece2 100644
--- a/mm/debug_page_ref.c
+++ b/mm/debug_page_ref.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/mm_types.h>
#include <linux/tracepoint.h>
diff --git a/mm/early_ioremap.c b/mm/early_ioremap.c
index b1dd4a948fc0..d04ac1ec0559 100644
--- a/mm/early_ioremap.c
+++ b/mm/early_ioremap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Provide common bits of early_ioremap() support for architectures needing
* temporary mappings during boot before ioremap() is available.
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 702f239cd6db..ec70d6e4b86d 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mm/fadvise.c
*
diff --git a/mm/failslab.c b/mm/failslab.c
index b0fac98cd938..8087d976a809 100644
--- a/mm/failslab.c
+++ b/mm/failslab.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/fault-inject.h>
#include <linux/slab.h>
#include <linux/mm.h>
diff --git a/mm/frame_vector.c b/mm/frame_vector.c
index 72ebec18629c..2f98df0d460e 100644
--- a/mm/frame_vector.c
+++ b/mm/frame_vector.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
diff --git a/mm/gup.c b/mm/gup.c
index b2b4d4263768..dfcde13f289a 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1643,6 +1643,47 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
return 1;
}
+static void gup_pgd_range(unsigned long addr, unsigned long end,
+ int write, struct page **pages, int *nr)
+{
+ unsigned long next;
+ pgd_t *pgdp;
+
+ pgdp = pgd_offset(current->mm, addr);
+ do {
+ pgd_t pgd = READ_ONCE(*pgdp);
+
+ next = pgd_addr_end(addr, end);
+ if (pgd_none(pgd))
+ return;
+ if (unlikely(pgd_huge(pgd))) {
+ if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
+ pages, nr))
+ return;
+ } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
+ if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
+ PGDIR_SHIFT, next, write, pages, nr))
+ return;
+ } else if (!gup_p4d_range(pgd, addr, next, write, pages, nr))
+ return;
+ } while (pgdp++, addr = next, addr != end);
+}
+
+#ifndef gup_fast_permitted
+/*
+ * Check if it's allowed to use __get_user_pages_fast() for the range, or
+ * we need to fall back to the slow version:
+ */
+bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
+{
+ unsigned long len, end;
+
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+ return end >= start;
+}
+#endif
+
/*
* Like get_user_pages_fast() except it's IRQ-safe in that it won't fall back to
* the regular GUP. It will only return non-negative values.
@@ -1650,10 +1691,8 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end,
int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
- struct mm_struct *mm = current->mm;
unsigned long addr, len, end;
- unsigned long next, flags;
- pgd_t *pgdp;
+ unsigned long flags;
int nr = 0;
start &= PAGE_MASK;
@@ -1677,45 +1716,15 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
* block IPIs that come from THPs splitting.
*/
- local_irq_save(flags);
- pgdp = pgd_offset(mm, addr);
- do {
- pgd_t pgd = READ_ONCE(*pgdp);
-
- next = pgd_addr_end(addr, end);
- if (pgd_none(pgd))
- break;
- if (unlikely(pgd_huge(pgd))) {
- if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
- pages, &nr))
- break;
- } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
- if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
- PGDIR_SHIFT, next, write, pages, &nr))
- break;
- } else if (!gup_p4d_range(pgd, addr, next, write, pages, &nr))
- break;
- } while (pgdp++, addr = next, addr != end);
- local_irq_restore(flags);
+ if (gup_fast_permitted(start, nr_pages, write)) {
+ local_irq_save(flags);
+ gup_pgd_range(addr, end, write, pages, &nr);
+ local_irq_restore(flags);
+ }
return nr;
}
-#ifndef gup_fast_permitted
-/*
- * Check if it's allowed to use __get_user_pages_fast() for the range, or
- * we need to fall back to the slow version:
- */
-bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
-{
- unsigned long len, end;
-
- len = (unsigned long) nr_pages << PAGE_SHIFT;
- end = start + len;
- return end >= start;
-}
-#endif
-
/**
* get_user_pages_fast() - pin user pages in memory
* @start: starting user address
@@ -1735,12 +1744,22 @@ bool gup_fast_permitted(unsigned long start, int nr_pages, int write)
int get_user_pages_fast(unsigned long start, int nr_pages, int write,
struct page **pages)
{
+ unsigned long addr, len, end;
int nr = 0, ret = 0;
start &= PAGE_MASK;
+ addr = start;
+ len = (unsigned long) nr_pages << PAGE_SHIFT;
+ end = start + len;
+
+ if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
+ (void __user *)start, len)))
+ return 0;
if (gup_fast_permitted(start, nr_pages, write)) {
- nr = __get_user_pages_fast(start, nr_pages, write, pages);
+ local_irq_disable();
+ gup_pgd_range(addr, end, write, pages, &nr);
+ local_irq_enable();
ret = nr;
}
diff --git a/mm/highmem.c b/mm/highmem.c
index 50b4ca6787f0..59db3223a5d6 100644
--- a/mm/highmem.c
+++ b/mm/highmem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* High memory handling common code and variables.
*
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 269b5df58543..003f7bcd0952 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -39,10 +39,10 @@
#include "internal.h"
/*
- * By default transparent hugepage support is disabled in order that avoid
- * to risk increase the memory footprint of applications without a guaranteed
- * benefit. When transparent hugepage support is enabled, is for all mappings,
- * and khugepaged scans all mappings.
+ * By default, transparent hugepage support is disabled in order to avoid
+ * risking an increased memory footprint for applications that are not
+ * guaranteed to benefit from it. When transparent hugepage support is
+ * enabled, it is for all mappings, and khugepaged scans all mappings.
* Defrag is invoked by khugepaged hugepage allocations and by page faults
* for all hugepage allocations.
*/
@@ -941,6 +941,9 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd = pmd_swp_mksoft_dirty(pmd);
set_pmd_at(src_mm, addr, src_pmd, pmd);
}
+ add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ atomic_long_inc(&dst_mm->nr_ptes);
+ pgtable_trans_huge_deposit(dst_mm, dst_pmd, pgtable);
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
ret = 0;
goto out_unlock;
@@ -2715,7 +2718,7 @@ static unsigned long deferred_split_count(struct shrinker *shrink,
struct shrink_control *sc)
{
struct pglist_data *pgdata = NODE_DATA(sc->nid);
- return ACCESS_ONCE(pgdata->split_queue_len);
+ return READ_ONCE(pgdata->split_queue_len);
}
static unsigned long deferred_split_scan(struct shrinker *shrink,
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 424b0ef08a60..2d2ff5e8bf2b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3984,6 +3984,9 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
unsigned long src_addr,
struct page **pagep)
{
+ struct address_space *mapping;
+ pgoff_t idx;
+ unsigned long size;
int vm_shared = dst_vma->vm_flags & VM_SHARED;
struct hstate *h = hstate_vma(dst_vma);
pte_t _dst_pte;
@@ -4021,13 +4024,24 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
__SetPageUptodate(page);
set_page_huge_active(page);
+ mapping = dst_vma->vm_file->f_mapping;
+ idx = vma_hugecache_offset(h, dst_vma, dst_addr);
+
/*
* If shared, add to page cache
*/
if (vm_shared) {
- struct address_space *mapping = dst_vma->vm_file->f_mapping;
- pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
+ size = i_size_read(mapping->host) >> huge_page_shift(h);
+ ret = -EFAULT;
+ if (idx >= size)
+ goto out_release_nounlock;
+ /*
+ * Serialization between remove_inode_hugepages() and
+ * huge_add_to_page_cache() below happens through the
+ * hugetlb_fault_mutex_table that here must be hold by
+ * the caller.
+ */
ret = huge_add_to_page_cache(page, mapping, idx);
if (ret)
goto out_release_nounlock;
@@ -4036,6 +4050,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
spin_lock(ptl);
+ /*
+ * Recheck the i_size after holding PT lock to make sure not
+ * to leave any page mapped (as page_mapped()) beyond the end
+ * of the i_size (remove_inode_hugepages() is strict about
+ * enforcing that). If we bail out here, we'll also leave a
+ * page in the radix tree in the vm_shared case beyond the end
+ * of the i_size, but remove_inode_hugepages() will take care
+ * of it as soon as we drop the hugetlb_fault_mutex_table.
+ */
+ size = i_size_read(mapping->host) >> huge_page_shift(h);
+ ret = -EFAULT;
+ if (idx >= size)
+ goto out_release_unlock;
+
ret = -EEXIST;
if (!huge_pte_none(huge_ptep_get(dst_pte)))
goto out_release_unlock;
diff --git a/mm/init-mm.c b/mm/init-mm.c
index 975e49f00f34..f94d5d15ebc0 100644
--- a/mm/init-mm.c
+++ b/mm/init-mm.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/mm_types.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index 2976a9ee104f..3289db38bc87 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -1,3 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
KASAN_SANITIZE := n
UBSAN_SANITIZE_kasan.o := n
KCOV_INSTRUMENT := n
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 1229298cce64..c70851a9a6a4 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __MM_KASAN_KASAN_H
#define __MM_KASAN_KASAN_H
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index c01f177a1120..43cb3043311b 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/mm.h>
diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c
index 2d5959c5f7c5..800d64b854ea 100644
--- a/mm/kmemcheck.c
+++ b/mm/kmemcheck.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/gfp.h>
#include <linux/mm_types.h>
#include <linux/mm.h>
diff --git a/mm/madvise.c b/mm/madvise.c
index fd70d6aabc3e..375cf32087e4 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/madvise.c
*
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d5f3a62887cf..661f046ad318 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -5828,21 +5828,6 @@ void mem_cgroup_sk_alloc(struct sock *sk)
if (!mem_cgroup_sockets_enabled)
return;
- /*
- * Socket cloning can throw us here with sk_memcg already
- * filled. It won't however, necessarily happen from
- * process context. So the test for root memcg given
- * the current task's memcg won't help us in this case.
- *
- * Respecting the original socket's memcg is a better
- * decision in this case.
- */
- if (sk->sk_memcg) {
- BUG_ON(mem_cgroup_is_root(sk->sk_memcg));
- css_get(&sk->sk_memcg->css);
- return;
- }
-
rcu_read_lock();
memcg = mem_cgroup_from_task(current);
if (memcg == root_mem_cgroup)
diff --git a/mm/memory.c b/mm/memory.c
index a728bed16c20..cae514e7dcfc 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3891,9 +3891,9 @@ static int handle_pte_fault(struct vm_fault *vmf)
/*
* some architectures can have larger ptes than wordsize,
* e.g.ppc44x-defconfig has CONFIG_PTE_64BIT=y and
- * CONFIG_32BIT=y, so READ_ONCE or ACCESS_ONCE cannot guarantee
- * atomic accesses. The code below just needs a consistent
- * view for the ifs and we later double check anyway with the
+ * CONFIG_32BIT=y, so READ_ONCE cannot guarantee atomic
+ * accesses. The code below just needs a consistent view
+ * for the ifs and we later double check anyway with the
* ptl lock held. So here a barrier will do.
*/
barrier();
diff --git a/mm/mempool.c b/mm/mempool.c
index 1c0294858527..c4a23cdae3f0 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/mempool.c
*
diff --git a/mm/memtest.c b/mm/memtest.c
index 8eaa4c3a5f65..f53ace709ccd 100644
--- a/mm/memtest.c
+++ b/mm/memtest.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
diff --git a/mm/migrate.c b/mm/migrate.c
index e00814ca390e..1236449b4777 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Memory Migration functionality - linux/mm/migrate.c
*
diff --git a/mm/mincore.c b/mm/mincore.c
index c5687c45c326..fc37afe226e6 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/mincore.c
*
diff --git a/mm/mlock.c b/mm/mlock.c
index dfc6f1912176..46af369c13e5 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/mlock.c
*
diff --git a/mm/mmzone.c b/mm/mmzone.c
index a51c0a67ea3d..4686fdc23bb9 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/mmzone.c
*
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 6d3e2f082290..ec39f730a0bf 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mm/mprotect.c
*
diff --git a/mm/mremap.c b/mm/mremap.c
index cfec004c4ff9..049470aa1e3e 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mm/mremap.c
*
diff --git a/mm/msync.c b/mm/msync.c
index 24e612fefa04..ef30a429623a 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/msync.c
*
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 3637809a18d0..9b02fda0886b 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* bootmem - A boot-time physical memory allocator and configurator
*
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 0b9c5cbe8eba..c518c845f202 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1972,31 +1972,31 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb)
int dirty_writeback_centisecs_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
- proc_dointvec(table, write, buffer, length, ppos);
- return 0;
+ unsigned int old_interval = dirty_writeback_interval;
+ int ret;
+
+ ret = proc_dointvec(table, write, buffer, length, ppos);
+
+ /*
+ * Writing 0 to dirty_writeback_interval will disable periodic writeback
+ * and a different non-zero value will wakeup the writeback threads.
+ * wb_wakeup_delayed() would be more appropriate, but it's a pain to
+ * iterate over all bdis and wbs.
+ * The reason we do this is to make the change take effect immediately.
+ */
+ if (!ret && write && dirty_writeback_interval &&
+ dirty_writeback_interval != old_interval)
+ wakeup_flusher_threads(WB_REASON_PERIODIC);
+
+ return ret;
}
#ifdef CONFIG_BLOCK
void laptop_mode_timer_fn(unsigned long data)
{
struct request_queue *q = (struct request_queue *)data;
- int nr_pages = global_node_page_state(NR_FILE_DIRTY) +
- global_node_page_state(NR_UNSTABLE_NFS);
- struct bdi_writeback *wb;
- /*
- * We want to write everything out, not just down to the dirty
- * threshold
- */
- if (!bdi_has_dirty_io(q->backing_dev_info))
- return;
-
- rcu_read_lock();
- list_for_each_entry_rcu(wb, &q->backing_dev_info->wb_list, bdi_node)
- if (wb_has_dirty_io(wb))
- wb_start_writeback(wb, nr_pages, true,
- WB_REASON_LAPTOP_TIMER);
- rcu_read_unlock();
+ wakeup_flusher_threads_bdi(q->backing_dev_info, WB_REASON_LAPTOP_TIMER);
}
/*
diff --git a/mm/page_counter.c b/mm/page_counter.c
index 7c6a63d2c27f..2a8df3ad60a4 100644
--- a/mm/page_counter.c
+++ b/mm/page_counter.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Lockless hierarchical page accounting & limiting
*
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 32f18911deda..4f0367d472c4 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/bootmem.h>
diff --git a/mm/page_idle.c b/mm/page_idle.c
index 4bd03a8d809e..0a49374e6931 100644
--- a/mm/page_idle.c
+++ b/mm/page_idle.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/fs.h>
diff --git a/mm/page_io.c b/mm/page_io.c
index 21502d341a67..cd52b9cc169b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/page_io.c
*
@@ -407,7 +408,7 @@ int swap_readpage(struct page *page, bool do_poll)
if (!READ_ONCE(bio->bi_private))
break;
- if (!blk_mq_poll(disk->queue, qc))
+ if (!blk_poll(disk->queue, qc))
break;
}
__set_current_state(TASK_RUNNING);
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 757410d9f758..44f213935bf6 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/page_isolation.c
*/
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 57abca62d4db..4f44b95b9d1e 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/debugfs.h>
#include <linux/mm.h>
#include <linux/slab.h>
diff --git a/mm/page_poison.c b/mm/page_poison.c
index be19e989ccff..e83fd44867de 100644
--- a/mm/page_poison.c
+++ b/mm/page_poison.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
diff --git a/mm/page_vma_mapped.c b/mm/page_vma_mapped.c
index 53afbb919a1c..d22b84310f6d 100644
--- a/mm/page_vma_mapped.c
+++ b/mm/page_vma_mapped.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/rmap.h>
#include <linux/hugetlb.h>
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 1a4197965415..23a3e415ac2c 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/mm.h>
#include <linux/highmem.h>
#include <linux/sched.h>
@@ -187,8 +188,12 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
do {
next = hugetlb_entry_end(h, addr, end);
pte = huge_pte_offset(walk->mm, addr & hmask, sz);
- if (pte && walk->hugetlb_entry)
+
+ if (pte)
err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
+ else if (walk->pte_hole)
+ err = walk->pte_hole(addr, next, walk);
+
if (err)
break;
} while (addr = next, addr != end);
diff --git a/mm/percpu-internal.h b/mm/percpu-internal.h
index 7065faf74b46..b1739dc06b73 100644
--- a/mm/percpu-internal.h
+++ b/mm/percpu-internal.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _MM_PERCPU_INTERNAL_H
#define _MM_PERCPU_INTERNAL_H
diff --git a/mm/percpu.c b/mm/percpu.c
index aa121cef76de..79e3549cab0f 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1329,7 +1329,9 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
* @gfp: allocation flags
*
* Allocate percpu area of @size bytes aligned at @align. If @gfp doesn't
- * contain %GFP_KERNEL, the allocation is atomic.
+ * contain %GFP_KERNEL, the allocation is atomic. If @gfp has __GFP_NOWARN
+ * then no warning will be triggered on invalid or failed allocation
+ * requests.
*
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
@@ -1337,10 +1339,11 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
gfp_t gfp)
{
+ bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
+ bool do_warn = !(gfp & __GFP_NOWARN);
static int warn_limit = 10;
struct pcpu_chunk *chunk;
const char *err;
- bool is_atomic = (gfp & GFP_KERNEL) != GFP_KERNEL;
int slot, off, cpu, ret;
unsigned long flags;
void __percpu *ptr;
@@ -1361,7 +1364,7 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved,
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE ||
!is_power_of_2(align))) {
- WARN(true, "illegal size (%zu) or align (%zu) for percpu allocation\n",
+ WARN(do_warn, "illegal size (%zu) or align (%zu) for percpu allocation\n",
size, align);
return NULL;
}
@@ -1482,7 +1485,7 @@ fail_unlock:
fail:
trace_percpu_alloc_percpu_fail(reserved, is_atomic, size, align);
- if (!is_atomic && warn_limit) {
+ if (!is_atomic && do_warn && warn_limit) {
pr_warn("allocation failed, size=%zu align=%zu atomic=%d, %s\n",
size, align, is_atomic, err);
dump_stack();
@@ -1507,7 +1510,9 @@ fail:
*
* Allocate zero-filled percpu area of @size bytes aligned at @align. If
* @gfp doesn't contain %GFP_KERNEL, the allocation doesn't block and can
- * be called from any context but is a lot more likely to fail.
+ * be called from any context but is a lot more likely to fail. If @gfp
+ * has __GFP_NOWARN then no warning will be triggered on invalid or failed
+ * allocation requests.
*
* RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure.
@@ -1851,7 +1856,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
__alignof__(ai->groups[0].cpu_map[0]));
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
- ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
+ ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), PAGE_SIZE);
if (!ptr)
return NULL;
ai = ptr;
@@ -2714,6 +2719,7 @@ void __init setup_per_cpu_areas(void)
if (pcpu_setup_first_chunk(ai, fc) < 0)
panic("Failed to initialize percpu areas.");
+ pcpu_free_alloc_info(ai);
}
#endif /* CONFIG_SMP */
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 1175f6a24fdb..1e4ee763c190 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* mm/pgtable-generic.c
*
diff --git a/mm/quicklist.c b/mm/quicklist.c
index daf6ff6e199a..5e98ac78e410 100644
--- a/mm/quicklist.c
+++ b/mm/quicklist.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Quicklist support.
*
diff --git a/mm/slab.c b/mm/slab.c
index 04dec48c3ed7..b7095884fd93 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/slab.c
* Written by Mark Hemment, 1996/97.
diff --git a/mm/slab.h b/mm/slab.h
index 073362816acc..86d7c7d860f9 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
#ifndef MM_SLAB_H
#define MM_SLAB_H
/*
@@ -258,7 +259,7 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx)
* memcg_caches issues a write barrier to match this (see
* memcg_create_kmem_cache()).
*/
- cachep = lockless_dereference(arr->entries[idx]);
+ cachep = READ_ONCE(arr->entries[idx]);
rcu_read_unlock();
return cachep;
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 80164599ca5d..0d7fe71ff5e4 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Slab allocator functions that are independent of the allocator strategy
*
diff --git a/mm/slob.c b/mm/slob.c
index a8bd6fa11a66..10249160b693 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SLOB Allocator: Simple List Of Blocks
*
diff --git a/mm/slub.c b/mm/slub.c
index 163352c537ab..1efbb8123037 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* SLUB: A slab allocator that limits cache line use instead of queuing
* objects in per cpu and per node lists.
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d1a39b8051e0..478ce6d4a2c4 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Virtual Memory Map support
*
diff --git a/mm/sparse.c b/mm/sparse.c
index 83b3bf6461af..60805abf98af 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* sparse memory mappings.
*/
@@ -22,8 +23,7 @@
* 1) mem_section - memory sections, mem_map's for valid memory
*/
#ifdef CONFIG_SPARSEMEM_EXTREME
-struct mem_section *mem_section[NR_SECTION_ROOTS]
- ____cacheline_internodealigned_in_smp;
+struct mem_section **mem_section;
#else
struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
____cacheline_internodealigned_in_smp;
@@ -100,7 +100,7 @@ static inline int sparse_index_init(unsigned long section_nr, int nid)
int __section_nr(struct mem_section* ms)
{
unsigned long root_nr;
- struct mem_section* root;
+ struct mem_section *root = NULL;
for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
@@ -111,7 +111,7 @@ int __section_nr(struct mem_section* ms)
break;
}
- VM_BUG_ON(root_nr == NR_SECTION_ROOTS);
+ VM_BUG_ON(!root);
return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
}
@@ -207,6 +207,16 @@ void __init memory_present(int nid, unsigned long start, unsigned long end)
{
unsigned long pfn;
+#ifdef CONFIG_SPARSEMEM_EXTREME
+ if (unlikely(!mem_section)) {
+ unsigned long size, align;
+
+ size = sizeof(struct mem_section) * NR_SECTION_ROOTS;
+ align = 1 << (INTERNODE_CACHE_SHIFT);
+ mem_section = memblock_virt_alloc(size, align);
+ }
+#endif
+
start &= PAGE_SECTION_MASK;
mminit_validate_memmodel_limits(&start, &end);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
@@ -329,11 +339,17 @@ again:
static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
{
unsigned long usemap_snr, pgdat_snr;
- static unsigned long old_usemap_snr = NR_MEM_SECTIONS;
- static unsigned long old_pgdat_snr = NR_MEM_SECTIONS;
+ static unsigned long old_usemap_snr;
+ static unsigned long old_pgdat_snr;
struct pglist_data *pgdat = NODE_DATA(nid);
int usemap_nid;
+ /* First call */
+ if (!old_usemap_snr) {
+ old_usemap_snr = NR_MEM_SECTIONS;
+ old_pgdat_snr = NR_MEM_SECTIONS;
+ }
+
usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT);
pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
if (usemap_snr == pgdat_snr)
diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c
index fcd2740f4ed7..45affaef3bc6 100644
--- a/mm/swap_cgroup.c
+++ b/mm/swap_cgroup.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
#include <linux/swap_cgroup.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
diff --git a/mm/swap_slots.c b/mm/swap_slots.c
index 13a174006b91..d81cfc5a43d5 100644
--- a/mm/swap_slots.c
+++ b/mm/swap_slots.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Manage cache of swap slots to be used for and returned from
* swap.
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 05b6803f0cce..326439428daf 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/swap_state.c
*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index bf91dc9e7a79..e47a21e64764 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2869,6 +2869,7 @@ static struct swap_info_struct *alloc_swap_info(void)
p->flags = SWP_USED;
spin_unlock(&swap_lock);
spin_lock_init(&p->lock);
+ spin_lock_init(&p->cont_lock);
return p;
}
@@ -3545,6 +3546,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
head = vmalloc_to_page(si->swap_map + offset);
offset &= ~PAGE_MASK;
+ spin_lock(&si->cont_lock);
/*
* Page allocation does not initialize the page's lru field,
* but it does always reset its private field.
@@ -3564,7 +3566,7 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
* a continuation page, free our allocation and use this one.
*/
if (!(count & COUNT_CONTINUED))
- goto out;
+ goto out_unlock_cont;
map = kmap_atomic(list_page) + offset;
count = *map;
@@ -3575,11 +3577,13 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
* free our allocation and use this one.
*/
if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
- goto out;
+ goto out_unlock_cont;
}
list_add_tail(&page->lru, &head->lru);
page = NULL; /* now it's attached, don't free it */
+out_unlock_cont:
+ spin_unlock(&si->cont_lock);
out:
unlock_cluster(ci);
spin_unlock(&si->lock);
@@ -3604,6 +3608,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
struct page *head;
struct page *page;
unsigned char *map;
+ bool ret;
head = vmalloc_to_page(si->swap_map + offset);
if (page_private(head) != SWP_CONTINUED) {
@@ -3611,6 +3616,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
return false; /* need to add count continuation */
}
+ spin_lock(&si->cont_lock);
offset &= ~PAGE_MASK;
page = list_entry(head->lru.next, struct page, lru);
map = kmap_atomic(page) + offset;
@@ -3631,8 +3637,10 @@ static bool swap_count_continued(struct swap_info_struct *si,
if (*map == SWAP_CONT_MAX) {
kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
- if (page == head)
- return false; /* add count continuation */
+ if (page == head) {
+ ret = false; /* add count continuation */
+ goto out;
+ }
map = kmap_atomic(page) + offset;
init_map: *map = 0; /* we didn't zero the page */
}
@@ -3645,7 +3653,7 @@ init_map: *map = 0; /* we didn't zero the page */
kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
}
- return true; /* incremented */
+ ret = true; /* incremented */
} else { /* decrementing */
/*
@@ -3671,8 +3679,11 @@ init_map: *map = 0; /* we didn't zero the page */
kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
}
- return count == COUNT_CONTINUED;
+ ret = count == COUNT_CONTINUED;
}
+out:
+ spin_unlock(&si->cont_lock);
+ return ret;
}
/*
diff --git a/mm/vmacache.c b/mm/vmacache.c
index 7ffa0ee341b5..db7596eb6132 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2014 Davidlohr Bueso.
*/
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 13d711dd8776..15b483ef6440 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* linux/mm/vmscan.c
*
@@ -1867,7 +1868,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* also allow kswapd to start writing pages during reclaim.
*/
if (stat.nr_unqueued_dirty == nr_taken) {
- wakeup_flusher_threads(0, WB_REASON_VMSCAN);
+ wakeup_flusher_threads(WB_REASON_VMSCAN);
set_bit(PGDAT_DIRTY, &pgdat->flags);
}
diff --git a/mm/workingset.c b/mm/workingset.c
index 7119cd745ace..b997c9de28f6 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* Workingset detection
*