summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-10-04 09:23:03 +0300
committerIngo Molnar <mingo@kernel.org>2018-10-04 09:23:03 +0300
commitc0554d2d3db438623b4f2f9abc3d766b2b15d2fb (patch)
treed00dc324772b95373f4bdc566b3437e5bf637157 /mm
parentbef459026b161fbc39d20dcba698ed0cfffbac38 (diff)
parent1d2ba7fee28b3a47cca8e8f4f94a22d30b2b3a6f (diff)
downloadlinux-c0554d2d3db438623b4f2f9abc3d766b2b15d2fb.tar.xz
Merge branch 'linus' into x86/core, to pick up fixes
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig1
-rw-r--r--mm/Makefile3
-rw-r--r--mm/backing-dev.c5
-rw-r--r--mm/debug.c4
-rw-r--r--mm/fadvise.c81
-rw-r--r--mm/huge_memory.c4
-rw-r--r--mm/kmemleak.c9
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/memory_hotplug.c3
-rw-r--r--mm/oom_kill.c14
-rw-r--r--mm/page_alloc.c4
-rw-r--r--mm/readahead.c45
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/util.c11
-rw-r--r--mm/vmacache.c38
-rw-r--r--mm/vmscan.c11
16 files changed, 118 insertions, 119 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index a550635ea5c3..de64ea658716 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -637,6 +637,7 @@ config DEFERRED_STRUCT_PAGE_INIT
depends on NO_BOOTMEM
depends on SPARSEMEM
depends on !NEED_PER_CPU_KM
+ depends on 64BIT
help
Ordinarily all struct pages are initialised during early boot in a
single thread. On very large machines this can take a considerable
diff --git a/mm/Makefile b/mm/Makefile
index 8716bdabe1e6..26ef77a3883b 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -32,7 +32,7 @@ ifdef CONFIG_CROSS_MEMORY_ATTACH
mmu-$(CONFIG_MMU) += process_vm_access.o
endif
-obj-y := filemap.o mempool.o oom_kill.o \
+obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page_alloc.o page-writeback.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \
util.o mmzone.o vmstat.o backing-dev.o \
@@ -49,7 +49,6 @@ else
obj-y += bootmem.o
endif
-obj-$(CONFIG_ADVISE_SYSCALLS) += fadvise.o
ifdef CONFIG_MMU
obj-$(CONFIG_ADVISE_SYSCALLS) += madvise.o
endif
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index f5981e9d6ae2..8a8bb8796c6c 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -491,6 +491,7 @@ static void cgwb_release_workfn(struct work_struct *work)
{
struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
release_work);
+ struct blkcg *blkcg = css_to_blkcg(wb->blkcg_css);
mutex_lock(&wb->bdi->cgwb_release_mutex);
wb_shutdown(wb);
@@ -499,6 +500,9 @@ static void cgwb_release_workfn(struct work_struct *work)
css_put(wb->blkcg_css);
mutex_unlock(&wb->bdi->cgwb_release_mutex);
+ /* triggers blkg destruction if cgwb_refcnt becomes zero */
+ blkcg_cgwb_put(blkcg);
+
fprop_local_destroy_percpu(&wb->memcg_completions);
percpu_ref_exit(&wb->refcnt);
wb_exit(wb);
@@ -597,6 +601,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
list_add_tail_rcu(&wb->bdi_node, &bdi->wb_list);
list_add(&wb->memcg_node, memcg_cgwb_list);
list_add(&wb->blkcg_node, blkcg_cgwb_list);
+ blkcg_cgwb_get(blkcg);
css_get(memcg_css);
css_get(blkcg_css);
}
diff --git a/mm/debug.c b/mm/debug.c
index 38c926520c97..bd10aad8539a 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(dump_vma);
void dump_mm(const struct mm_struct *mm)
{
- pr_emerg("mm %px mmap %px seqnum %d task_size %lu\n"
+ pr_emerg("mm %px mmap %px seqnum %llu task_size %lu\n"
#ifdef CONFIG_MMU
"get_unmapped_area %px\n"
#endif
@@ -142,7 +142,7 @@ void dump_mm(const struct mm_struct *mm)
"tlb_flush_pending %d\n"
"def_flags: %#lx(%pGv)\n",
- mm, mm->mmap, mm->vmacache_seqnum, mm->task_size,
+ mm, mm->mmap, (long long) mm->vmacache_seqnum, mm->task_size,
#ifdef CONFIG_MMU
mm->get_unmapped_area,
#endif
diff --git a/mm/fadvise.c b/mm/fadvise.c
index 2d8376e3c640..467bcd032037 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -27,9 +27,9 @@
* deactivate the pages and clear PG_Referenced.
*/
-int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+static int generic_fadvise(struct file *file, loff_t offset, loff_t len,
+ int advice)
{
- struct fd f = fdget(fd);
struct inode *inode;
struct address_space *mapping;
struct backing_dev_info *bdi;
@@ -37,22 +37,14 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
pgoff_t start_index;
pgoff_t end_index;
unsigned long nrpages;
- int ret = 0;
-
- if (!f.file)
- return -EBADF;
- inode = file_inode(f.file);
- if (S_ISFIFO(inode->i_mode)) {
- ret = -ESPIPE;
- goto out;
- }
+ inode = file_inode(file);
+ if (S_ISFIFO(inode->i_mode))
+ return -ESPIPE;
- mapping = f.file->f_mapping;
- if (!mapping || len < 0) {
- ret = -EINVAL;
- goto out;
- }
+ mapping = file->f_mapping;
+ if (!mapping || len < 0)
+ return -EINVAL;
bdi = inode_to_bdi(mapping->host);
@@ -67,9 +59,9 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
/* no bad return value, but ignore advice */
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
- goto out;
+ return 0;
}
/*
@@ -85,21 +77,21 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
switch (advice) {
case POSIX_FADV_NORMAL:
- f.file->f_ra.ra_pages = bdi->ra_pages;
- spin_lock(&f.file->f_lock);
- f.file->f_mode &= ~FMODE_RANDOM;
- spin_unlock(&f.file->f_lock);
+ file->f_ra.ra_pages = bdi->ra_pages;
+ spin_lock(&file->f_lock);
+ file->f_mode &= ~FMODE_RANDOM;
+ spin_unlock(&file->f_lock);
break;
case POSIX_FADV_RANDOM:
- spin_lock(&f.file->f_lock);
- f.file->f_mode |= FMODE_RANDOM;
- spin_unlock(&f.file->f_lock);
+ spin_lock(&file->f_lock);
+ file->f_mode |= FMODE_RANDOM;
+ spin_unlock(&file->f_lock);
break;
case POSIX_FADV_SEQUENTIAL:
- f.file->f_ra.ra_pages = bdi->ra_pages * 2;
- spin_lock(&f.file->f_lock);
- f.file->f_mode &= ~FMODE_RANDOM;
- spin_unlock(&f.file->f_lock);
+ file->f_ra.ra_pages = bdi->ra_pages * 2;
+ spin_lock(&file->f_lock);
+ file->f_mode &= ~FMODE_RANDOM;
+ spin_unlock(&file->f_lock);
break;
case POSIX_FADV_WILLNEED:
/* First and last PARTIAL page! */
@@ -115,8 +107,7 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
* Ignore return value because fadvise() shall return
* success even if filesystem can't retrieve a hint,
*/
- force_page_cache_readahead(mapping, f.file, start_index,
- nrpages);
+ force_page_cache_readahead(mapping, file, start_index, nrpages);
break;
case POSIX_FADV_NOREUSE:
break;
@@ -183,9 +174,32 @@ int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
}
break;
default:
- ret = -EINVAL;
+ return -EINVAL;
}
-out:
+ return 0;
+}
+
+int vfs_fadvise(struct file *file, loff_t offset, loff_t len, int advice)
+{
+ if (file->f_op->fadvise)
+ return file->f_op->fadvise(file, offset, len, advice);
+
+ return generic_fadvise(file, offset, len, advice);
+}
+EXPORT_SYMBOL(vfs_fadvise);
+
+#ifdef CONFIG_ADVISE_SYSCALLS
+
+int ksys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+{
+ struct fd f = fdget(fd);
+ int ret;
+
+ if (!f.file)
+ return -EBADF;
+
+ ret = vfs_fadvise(f.file, offset, len, advice);
+
fdput(f);
return ret;
}
@@ -203,3 +217,4 @@ SYSCALL_DEFINE4(fadvise64, int, fd, loff_t, offset, size_t, len, int, advice)
}
#endif
+#endif
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c3bc7e9c9a2a..533f9b00147d 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -821,11 +821,11 @@ vm_fault_t vmf_insert_pfn_pud(struct vm_area_struct *vma, unsigned long addr,
* but we need to be consistent with PTEs and architectures that
* can't support a 'special' bit.
*/
- BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)));
+ BUG_ON(!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) &&
+ !pfn_t_devmap(pfn));
BUG_ON((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) ==
(VM_PFNMAP|VM_MIXEDMAP));
BUG_ON((vma->vm_flags & VM_PFNMAP) && is_cow_mapping(vma->vm_flags));
- BUG_ON(!pfn_t_devmap(pfn));
if (addr < vma->vm_start || addr >= vma->vm_end)
return VM_FAULT_SIGBUS;
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 9a085d525bbc..17dd883198ae 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -2097,6 +2097,11 @@ static int __init kmemleak_late_init(void)
kmemleak_initialized = 1;
+ dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
+ &kmemleak_fops);
+ if (!dentry)
+ pr_warn("Failed to create the debugfs kmemleak file\n");
+
if (kmemleak_error) {
/*
* Some error occurred and kmemleak was disabled. There is a
@@ -2108,10 +2113,6 @@ static int __init kmemleak_late_init(void)
return -ENOMEM;
}
- dentry = debugfs_create_file("kmemleak", 0644, NULL, NULL,
- &kmemleak_fops);
- if (!dentry)
- pr_warn("Failed to create the debugfs kmemleak file\n");
mutex_lock(&scan_mutex);
start_scan_thread();
mutex_unlock(&scan_mutex);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4ead5a4817de..e79cb59552d9 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1701,8 +1701,6 @@ static enum oom_status mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int
if (mem_cgroup_out_of_memory(memcg, mask, order))
return OOM_SUCCESS;
- WARN(1,"Memory cgroup charge failed because of no reclaimable memory! "
- "This looks like a misconfiguration or a kernel bug.");
return OOM_FAILED;
}
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 9eea6e809a4e..38d94b703e9d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1333,7 +1333,8 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
if (__PageMovable(page))
return pfn;
if (PageHuge(page)) {
- if (page_huge_active(page))
+ if (hugepage_migration_supported(page_hstate(page)) &&
+ page_huge_active(page))
return pfn;
else
pfn = round_up(pfn + 1,
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b5b25e4dcbbb..f10aa5360616 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -522,6 +522,7 @@ bool __oom_reap_task_mm(struct mm_struct *mm)
tlb_gather_mmu(&tlb, mm, start, end);
if (mmu_notifier_invalidate_range_start_nonblock(mm, start, end)) {
+ tlb_finish_mmu(&tlb, start, end);
ret = false;
continue;
}
@@ -1103,10 +1104,17 @@ bool out_of_memory(struct oom_control *oc)
}
select_bad_process(oc);
- /* Found nothing?!?! Either we hang forever, or we panic. */
- if (!oc->chosen && !is_sysrq_oom(oc) && !is_memcg_oom(oc)) {
+ /* Found nothing?!?! */
+ if (!oc->chosen) {
dump_header(oc, NULL);
- panic("Out of memory and no killable processes...\n");
+ pr_warn("Out of memory and no killable processes...\n");
+ /*
+ * If we got here due to an actual allocation at the
+ * system level, we cannot survive this and will enter
+ * an endless loop in the allocator. Bail out now.
+ */
+ if (!is_sysrq_oom(oc) && !is_memcg_oom(oc))
+ panic("System is deadlocked on memory\n");
}
if (oc->chosen && oc->chosen != (void *)-1UL)
oom_kill_process(oc, !is_memcg_oom(oc) ? "Out of memory" :
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 05e983f42316..89d2a2ab3fe6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -7708,6 +7708,10 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
* handle each tail page individually in migration.
*/
if (PageHuge(page)) {
+
+ if (!hugepage_migration_supported(page_hstate(page)))
+ goto unmovable;
+
iter = round_up(iter + 1, 1<<compound_order(page)) - 1;
continue;
}
diff --git a/mm/readahead.c b/mm/readahead.c
index a59ea70527b9..4e630143a0ba 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -20,6 +20,7 @@
#include <linux/file.h>
#include <linux/mm_inline.h>
#include <linux/blk-cgroup.h>
+#include <linux/fadvise.h>
#include "internal.h"
@@ -575,24 +576,6 @@ page_cache_async_readahead(struct address_space *mapping,
}
EXPORT_SYMBOL_GPL(page_cache_async_readahead);
-static ssize_t
-do_readahead(struct address_space *mapping, struct file *filp,
- pgoff_t index, unsigned long nr)
-{
- if (!mapping || !mapping->a_ops)
- return -EINVAL;
-
- /*
- * Readahead doesn't make sense for DAX inodes, but we don't want it
- * to report a failure either. Instead, we just return success and
- * don't do any work.
- */
- if (dax_mapping(mapping))
- return 0;
-
- return force_page_cache_readahead(mapping, filp, index, nr);
-}
-
ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
{
ssize_t ret;
@@ -600,16 +583,22 @@ ssize_t ksys_readahead(int fd, loff_t offset, size_t count)
ret = -EBADF;
f = fdget(fd);
- if (f.file) {
- if (f.file->f_mode & FMODE_READ) {
- struct address_space *mapping = f.file->f_mapping;
- pgoff_t start = offset >> PAGE_SHIFT;
- pgoff_t end = (offset + count - 1) >> PAGE_SHIFT;
- unsigned long len = end - start + 1;
- ret = do_readahead(mapping, f.file, start, len);
- }
- fdput(f);
- }
+ if (!f.file || !(f.file->f_mode & FMODE_READ))
+ goto out;
+
+ /*
+ * The readahead() syscall is intended to run only on files
+ * that can execute readahead. If readahead is not possible
+ * on this file, then we must return -EINVAL.
+ */
+ ret = -EINVAL;
+ if (!f.file->f_mapping || !f.file->f_mapping->a_ops ||
+ !S_ISREG(file_inode(f.file)->i_mode))
+ goto out;
+
+ ret = vfs_fadvise(f.file, offset, count, POSIX_FADV_WILLNEED);
+out:
+ fdput(f);
return ret;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 0376c124b043..446942677cd4 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2227,6 +2227,8 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
mpol_shared_policy_init(&info->policy, NULL);
break;
}
+
+ lockdep_annotate_inode_mutex_key(inode);
} else
shmem_free_inode(sb);
return inode;
diff --git a/mm/util.c b/mm/util.c
index d2890a407332..9e3ebd2ef65f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -435,11 +435,14 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
EXPORT_SYMBOL(kvmalloc_node);
/**
- * kvfree - free memory allocated with kvmalloc
- * @addr: pointer returned by kvmalloc
+ * kvfree() - Free memory.
+ * @addr: Pointer to allocated memory.
*
- * If the memory is allocated from vmalloc area it is freed with vfree().
- * Otherwise kfree() is used.
+ * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
+ * It is slightly more efficient to use kfree() or vfree() if you are certain
+ * that you know which one to use.
+ *
+ * Context: Any context except NMI.
*/
void kvfree(const void *addr)
{
diff --git a/mm/vmacache.c b/mm/vmacache.c
index ea517bef7dc5..cdc32a3b02fa 100644
--- a/mm/vmacache.c
+++ b/mm/vmacache.c
@@ -20,44 +20,6 @@
#define VMACACHE_HASH(addr) ((addr >> VMACACHE_SHIFT) & VMACACHE_MASK)
/*
- * Flush vma caches for threads that share a given mm.
- *
- * The operation is safe because the caller holds the mmap_sem
- * exclusively and other threads accessing the vma cache will
- * have mmap_sem held at least for read, so no extra locking
- * is required to maintain the vma cache.
- */
-void vmacache_flush_all(struct mm_struct *mm)
-{
- struct task_struct *g, *p;
-
- count_vm_vmacache_event(VMACACHE_FULL_FLUSHES);
-
- /*
- * Single threaded tasks need not iterate the entire
- * list of process. We can avoid the flushing as well
- * since the mm's seqnum was increased and don't have
- * to worry about other threads' seqnum. Current's
- * flush will occur upon the next lookup.
- */
- if (atomic_read(&mm->mm_users) == 1)
- return;
-
- rcu_read_lock();
- for_each_process_thread(g, p) {
- /*
- * Only flush the vmacache pointers as the
- * mm seqnum is already set and curr's will
- * be set upon invalidation when the next
- * lookup is done.
- */
- if (mm == p->mm)
- vmacache_flush(p);
- }
- rcu_read_unlock();
-}
-
-/*
* This task may be accessing a foreign mm via (for example)
* get_user_pages()->find_vma(). The vmacache is task-local and this
* task's vmacache pertains to a different mm (ie, its own). There is
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7e7d25504651..c7ce2c161225 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -476,6 +476,17 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
delta = freeable >> priority;
delta *= 4;
do_div(delta, shrinker->seeks);
+
+ /*
+ * Make sure we apply some minimal pressure on default priority
+ * even on small cgroups. Stale objects are not only consuming memory
+ * by themselves, but can also hold a reference to a dying cgroup,
+ * preventing it from being reclaimed. A dying cgroup with all
+ * corresponding structures like per-cpu stats and kmem caches
+ * can be really big, so it may lead to a significant waste of memory.
+ */
+ delta = max_t(unsigned long long, delta, min(freeable, batch_size));
+
total_scan += delta;
if (total_scan < 0) {
pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n",