summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c23
-rw-r--r--mm/bounce.c4
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/huge_memory.c6
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/ksm.c23
-rw-r--r--mm/memblock.c6
-rw-r--r--mm/memcontrol.c208
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/mlock.c3
-rw-r--r--mm/mmap.c17
-rw-r--r--mm/mprotect.c3
-rw-r--r--mm/nommu.c9
-rw-r--r--mm/page_alloc.c1
-rw-r--r--mm/page_cgroup.c4
-rw-r--r--mm/percpu-vm.c3
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/swap.c8
-rw-r--r--mm/swap_state.c10
-rw-r--r--mm/swapfile.c30
-rw-r--r--mm/truncate.c2
-rw-r--r--mm/vmalloc.c8
24 files changed, 218 insertions, 173 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 7ba8feae11b8..dd8e2aafb07e 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -318,7 +318,7 @@ static void wakeup_timer_fn(unsigned long data)
if (bdi->wb.task) {
trace_writeback_wake_thread(bdi);
wake_up_process(bdi->wb.task);
- } else {
+ } else if (bdi->dev) {
/*
* When bdi tasks are inactive for long time, they are killed.
* In this case we have to wake-up the forker thread which
@@ -584,6 +584,8 @@ EXPORT_SYMBOL(bdi_register_dev);
*/
static void bdi_wb_shutdown(struct backing_dev_info *bdi)
{
+ struct task_struct *task;
+
if (!bdi_cap_writeback_dirty(bdi))
return;
@@ -602,8 +604,13 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
* Finally, kill the kernel thread. We don't need to be RCU
* safe anymore, since the bdi is gone from visibility.
*/
- if (bdi->wb.task)
- kthread_stop(bdi->wb.task);
+ spin_lock_bh(&bdi->wb_lock);
+ task = bdi->wb.task;
+ bdi->wb.task = NULL;
+ spin_unlock_bh(&bdi->wb_lock);
+
+ if (task)
+ kthread_stop(task);
}
/*
@@ -623,7 +630,9 @@ static void bdi_prune_sb(struct backing_dev_info *bdi)
void bdi_unregister(struct backing_dev_info *bdi)
{
- if (bdi->dev) {
+ struct device *dev = bdi->dev;
+
+ if (dev) {
bdi_set_min_ratio(bdi, 0);
trace_writeback_bdi_unregister(bdi);
bdi_prune_sb(bdi);
@@ -632,8 +641,12 @@ void bdi_unregister(struct backing_dev_info *bdi)
if (!bdi_cap_flush_forker(bdi))
bdi_wb_shutdown(bdi);
bdi_debug_unregister(bdi);
- device_unregister(bdi->dev);
+
+ spin_lock_bh(&bdi->wb_lock);
bdi->dev = NULL;
+ spin_unlock_bh(&bdi->wb_lock);
+
+ device_unregister(dev);
}
}
EXPORT_SYMBOL(bdi_unregister);
diff --git a/mm/bounce.c b/mm/bounce.c
index 4e9ae722af83..d1be02ca1889 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -50,9 +50,9 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
unsigned char *vto;
local_irq_save(flags);
- vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ);
+ vto = kmap_atomic(to->bv_page);
memcpy(vto + to->bv_offset, vfrom, to->bv_len);
- kunmap_atomic(vto, KM_BOUNCE_READ);
+ kunmap_atomic(vto);
local_irq_restore(flags);
}
diff --git a/mm/filemap.c b/mm/filemap.c
index b66275757c28..2f8165075a5a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1318,10 +1318,10 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
* taking the kmap.
*/
if (!fault_in_pages_writeable(desc->arg.buf, size)) {
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
left = __copy_to_user_inatomic(desc->arg.buf,
kaddr + offset, size);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
if (left == 0)
goto success;
}
@@ -2045,7 +2045,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
size_t copied;
BUG_ON(!in_atomic());
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
if (likely(i->nr_segs == 1)) {
int left;
char __user *buf = i->iov->iov_base + i->iov_offset;
@@ -2055,7 +2055,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
copied = __iovec_copy_from_user_inatomic(kaddr + offset,
i->iov, i->iov_offset, bytes);
}
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
return copied;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 91d3efb25d15..8f7fc394f636 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
set_pmd_at(mm, haddr, pmd, entry);
prepare_pmd_huge_pte(pgtable, mm);
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+ mm->nr_ptes++;
spin_unlock(&mm->page_table_lock);
}
@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd = pmd_mkold(pmd_wrprotect(pmd));
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
prepare_pmd_huge_pte(pgtable, dst_mm);
+ dst_mm->nr_ptes++;
ret = 0;
out_unlock:
@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
}
kfree(pages);
- mm->nr_ptes++;
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
page_remove_rmap(page);
@@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
VM_BUG_ON(page_mapcount(page) < 0);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
VM_BUG_ON(!PageHead(page));
+ tlb->mm->nr_ptes--;
spin_unlock(&tlb->mm->page_table_lock);
tlb_remove_page(tlb, page);
pte_free(tlb->mm, pgtable);
@@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page,
pte_unmap(pte);
}
- mm->nr_ptes++;
smp_wmb(); /* make pte visible before pmd */
/*
* Up to this point the pmd is present and huge and
@@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm,
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache(vma, address, _pmd);
prepare_pmd_huge_pte(pgtable, mm);
- mm->nr_ptes--;
spin_unlock(&mm->page_table_lock);
#ifndef CONFIG_NUMA
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 5f34bd8dda34..a876871f6be5 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2277,8 +2277,8 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
set_page_dirty(page);
list_add(&page->lru, &page_list);
}
- spin_unlock(&mm->page_table_lock);
flush_tlb_range(vma, start, end);
+ spin_unlock(&mm->page_table_lock);
mmu_notifier_invalidate_range_end(mm, start, end);
list_for_each_entry_safe(page, tmp, &page_list, lru) {
page_remove_rmap(page);
diff --git a/mm/ksm.c b/mm/ksm.c
index 1925ffbfb27f..a6d3fb7e6c10 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -28,7 +28,6 @@
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/slab.h>
-#include <linux/memcontrol.h>
#include <linux/rbtree.h>
#include <linux/memory.h>
#include <linux/mmu_notifier.h>
@@ -673,9 +672,9 @@ error:
static u32 calc_checksum(struct page *page)
{
u32 checksum;
- void *addr = kmap_atomic(page, KM_USER0);
+ void *addr = kmap_atomic(page);
checksum = jhash2(addr, PAGE_SIZE / 4, 17);
- kunmap_atomic(addr, KM_USER0);
+ kunmap_atomic(addr);
return checksum;
}
@@ -684,11 +683,11 @@ static int memcmp_pages(struct page *page1, struct page *page2)
char *addr1, *addr2;
int ret;
- addr1 = kmap_atomic(page1, KM_USER0);
- addr2 = kmap_atomic(page2, KM_USER1);
+ addr1 = kmap_atomic(page1);
+ addr2 = kmap_atomic(page2);
ret = memcmp(addr1, addr2, PAGE_SIZE);
- kunmap_atomic(addr2, KM_USER1);
- kunmap_atomic(addr1, KM_USER0);
+ kunmap_atomic(addr2);
+ kunmap_atomic(addr1);
return ret;
}
@@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page) {
- /*
- * The memcg-specific accounting when moving
- * pages around the LRU lists relies on the
- * page's owner (memcg) to be valid. Usually,
- * pages are assigned to a new owner before
- * being put on the LRU list, but since this
- * is not the case here, the stale owner from
- * a previous allocation cycle must be reset.
- */
- mem_cgroup_reset_owner(new_page);
copy_user_highpage(new_page, page, address, vma);
SetPageDirty(new_page);
diff --git a/mm/memblock.c b/mm/memblock.c
index 77b5f227e1d8..99f285599501 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -99,9 +99,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
phys_addr_t this_start, this_end, cand;
u64 i;
- /* align @size to avoid excessive fragmentation on reserved array */
- size = round_up(size, align);
-
/* pump up @end */
if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
end = memblock.current_limit;
@@ -731,6 +728,9 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
{
phys_addr_t found;
+ /* align @size to avoid excessive fragmentation on reserved array */
+ size = round_up(size, align);
+
found = memblock_find_in_range_node(0, max_addr, size, align, nid);
if (found && !memblock_reserve(found, size))
return found;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6728a7ae6f2d..26c6f4ec20f4 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -230,10 +230,30 @@ struct mem_cgroup {
* the counter to account for memory usage
*/
struct res_counter res;
- /*
- * the counter to account for mem+swap usage.
- */
- struct res_counter memsw;
+
+ union {
+ /*
+ * the counter to account for mem+swap usage.
+ */
+ struct res_counter memsw;
+
+ /*
+ * rcu_freeing is used only when freeing struct mem_cgroup,
+ * so put it into a union to avoid wasting more memory.
+ * It must be disjoint from the css field. It could be
+ * in a union with the res field, but res plays a much
+ * larger part in mem_cgroup life than memsw, and might
+ * be of interest, even at time of free, when debugging.
+ * So share rcu_head with the less interesting memsw.
+ */
+ struct rcu_head rcu_freeing;
+ /*
+ * But when using vfree(), that cannot be done at
+ * interrupt time, so we must then queue the work.
+ */
+ struct work_struct work_freeing;
+ };
+
/*
* Per cgroup active and inactive list, similar to the
* per zone LRU lists.
@@ -1042,6 +1062,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
pc = lookup_page_cgroup(page);
memcg = pc->mem_cgroup;
+
+ /*
+ * Surreptitiously switch any uncharged page to root:
+ * an uncharged page off lru does nothing to secure
+ * its former mem_cgroup from sudden removal.
+ *
+ * Our caller holds lru_lock, and PageCgroupUsed is updated
+ * under page_cgroup lock: between them, they make all uses
+ * of pc->mem_cgroup safe.
+ */
+ if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
+ pc->mem_cgroup = memcg = root_mem_cgroup;
+
mz = page_cgroup_zoneinfo(memcg, page);
/* compound_order() is stabilized through lru_lock */
MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
@@ -2408,8 +2441,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
struct page *page,
unsigned int nr_pages,
struct page_cgroup *pc,
- enum charge_type ctype)
+ enum charge_type ctype,
+ bool lrucare)
{
+ struct zone *uninitialized_var(zone);
+ bool was_on_lru = false;
+
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
@@ -2420,6 +2457,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
* we don't need page_cgroup_lock about tail pages, becase they are not
* accessed by any other context at this point.
*/
+
+ /*
+ * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
+ * may already be on some other mem_cgroup's LRU. Take care of it.
+ */
+ if (lrucare) {
+ zone = page_zone(page);
+ spin_lock_irq(&zone->lru_lock);
+ if (PageLRU(page)) {
+ ClearPageLRU(page);
+ del_page_from_lru_list(zone, page, page_lru(page));
+ was_on_lru = true;
+ }
+ }
+
pc->mem_cgroup = memcg;
/*
* We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2443,9 +2495,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
break;
}
+ if (lrucare) {
+ if (was_on_lru) {
+ VM_BUG_ON(PageLRU(page));
+ SetPageLRU(page);
+ add_page_to_lru_list(zone, page, page_lru(page));
+ }
+ spin_unlock_irq(&zone->lru_lock);
+ }
+
mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
unlock_page_cgroup(pc);
- WARN_ON_ONCE(PageLRU(page));
+
/*
* "charge_statistics" updated event counter. Then, check it.
* Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -2643,7 +2704,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
if (ret == -ENOMEM)
return ret;
- __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
+ __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
return 0;
}
@@ -2663,35 +2724,6 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
enum charge_type ctype);
-static void
-__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
- enum charge_type ctype)
-{
- struct page_cgroup *pc = lookup_page_cgroup(page);
- struct zone *zone = page_zone(page);
- unsigned long flags;
- bool removed = false;
-
- /*
- * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
- * is already on LRU. It means the page may on some other page_cgroup's
- * LRU. Take care of it.
- */
- spin_lock_irqsave(&zone->lru_lock, flags);
- if (PageLRU(page)) {
- del_page_from_lru_list(zone, page, page_lru(page));
- ClearPageLRU(page);
- removed = true;
- }
- __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
- if (removed) {
- add_page_to_lru_list(zone, page, page_lru(page));
- SetPageLRU(page);
- }
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- return;
-}
-
int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask)
{
@@ -2769,13 +2801,16 @@ static void
__mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
enum charge_type ctype)
{
+ struct page_cgroup *pc;
+
if (mem_cgroup_disabled())
return;
if (!memcg)
return;
cgroup_exclude_rmdir(&memcg->css);
- __mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
+ pc = lookup_page_cgroup(page);
+ __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
/*
* Now swap is on-memory. This means this page may be
* counted both as mem and swap....double count.
@@ -3027,23 +3062,6 @@ void mem_cgroup_uncharge_end(void)
batch->memcg = NULL;
}
-/*
- * A function for resetting pc->mem_cgroup for newly allocated pages.
- * This function should be called if the newpage will be added to LRU
- * before start accounting.
- */
-void mem_cgroup_reset_owner(struct page *newpage)
-{
- struct page_cgroup *pc;
-
- if (mem_cgroup_disabled())
- return;
-
- pc = lookup_page_cgroup(newpage);
- VM_BUG_ON(PageCgroupUsed(pc));
- pc->mem_cgroup = root_mem_cgroup;
-}
-
#ifdef CONFIG_SWAP
/*
* called after __delete_from_swap_cache() and drop "page" account.
@@ -3248,7 +3266,7 @@ int mem_cgroup_prepare_migration(struct page *page,
ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
else
ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
- __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
+ __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
return ret;
}
@@ -3332,7 +3350,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
* the newpage may be on LRU(or pagevec for LRU) already. We lock
* LRU while we overwrite pc->mem_cgroup.
*/
- __mem_cgroup_commit_charge_lrucare(newpage, memcg, type);
+ __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
}
#ifdef CONFIG_DEBUG_VM
@@ -4414,6 +4432,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
*/
BUG_ON(!thresholds);
+ if (!thresholds->primary)
+ goto unlock;
+
usage = mem_cgroup_usage(memcg, type == _MEMSWAP);
/* Check if a threshold crossed before removing */
@@ -4462,7 +4483,7 @@ swap_buffers:
/* To be sure that nobody uses thresholds */
synchronize_rcu();
-
+unlock:
mutex_unlock(&memcg->thresholds_lock);
}
@@ -4581,10 +4602,9 @@ static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
return mem_cgroup_sockets_init(cont, ss);
};
-static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
- struct cgroup *cont)
+static void kmem_cgroup_destroy(struct cgroup *cont)
{
- mem_cgroup_sockets_destroy(cont, ss);
+ mem_cgroup_sockets_destroy(cont);
}
#else
static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
@@ -4592,8 +4612,7 @@ static int register_kmem_files(struct cgroup *cont, struct cgroup_subsys *ss)
return 0;
}
-static void kmem_cgroup_destroy(struct cgroup_subsys *ss,
- struct cgroup *cont)
+static void kmem_cgroup_destroy(struct cgroup *cont)
{
}
#endif
@@ -4779,6 +4798,27 @@ out_free:
}
/*
+ * Helpers for freeing a vzalloc()ed mem_cgroup by RCU,
+ * but in process context. The work_freeing structure is overlaid
+ * on the rcu_freeing structure, which itself is overlaid on memsw.
+ */
+static void vfree_work(struct work_struct *work)
+{
+ struct mem_cgroup *memcg;
+
+ memcg = container_of(work, struct mem_cgroup, work_freeing);
+ vfree(memcg);
+}
+static void vfree_rcu(struct rcu_head *rcu_head)
+{
+ struct mem_cgroup *memcg;
+
+ memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
+ INIT_WORK(&memcg->work_freeing, vfree_work);
+ schedule_work(&memcg->work_freeing);
+}
+
+/*
* At destroying mem_cgroup, references from swap_cgroup can remain.
* (scanning all at force_empty is too costly...)
*
@@ -4801,9 +4841,9 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
free_percpu(memcg->stat);
if (sizeof(struct mem_cgroup) < PAGE_SIZE)
- kfree(memcg);
+ kfree_rcu(memcg, rcu_freeing);
else
- vfree(memcg);
+ call_rcu(&memcg->rcu_freeing, vfree_rcu);
}
static void mem_cgroup_get(struct mem_cgroup *memcg)
@@ -4885,7 +4925,7 @@ err_cleanup:
}
static struct cgroup_subsys_state * __ref
-mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
+mem_cgroup_create(struct cgroup *cont)
{
struct mem_cgroup *memcg, *parent;
long error = -ENOMEM;
@@ -4947,20 +4987,18 @@ free_out:
return ERR_PTR(error);
}
-static int mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
- struct cgroup *cont)
+static int mem_cgroup_pre_destroy(struct cgroup *cont)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
return mem_cgroup_force_empty(memcg, false);
}
-static void mem_cgroup_destroy(struct cgroup_subsys *ss,
- struct cgroup *cont)
+static void mem_cgroup_destroy(struct cgroup *cont)
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
- kmem_cgroup_destroy(ss, cont);
+ kmem_cgroup_destroy(cont);
mem_cgroup_put(memcg);
}
@@ -5297,9 +5335,8 @@ static void mem_cgroup_clear_mc(void)
mem_cgroup_end_move(from);
}
-static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *cgroup,
- struct cgroup_taskset *tset)
+static int mem_cgroup_can_attach(struct cgroup *cgroup,
+ struct cgroup_taskset *tset)
{
struct task_struct *p = cgroup_taskset_first(tset);
int ret = 0;
@@ -5337,9 +5374,8 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
return ret;
}
-static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
- struct cgroup *cgroup,
- struct cgroup_taskset *tset)
+static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
+ struct cgroup_taskset *tset)
{
mem_cgroup_clear_mc();
}
@@ -5454,9 +5490,8 @@ retry:
up_read(&mm->mmap_sem);
}
-static void mem_cgroup_move_task(struct cgroup_subsys *ss,
- struct cgroup *cont,
- struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(struct cgroup *cont,
+ struct cgroup_taskset *tset)
{
struct task_struct *p = cgroup_taskset_first(tset);
struct mm_struct *mm = get_task_mm(p);
@@ -5471,20 +5506,17 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
mem_cgroup_clear_mc();
}
#else /* !CONFIG_MMU */
-static int mem_cgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *cgroup,
- struct cgroup_taskset *tset)
+static int mem_cgroup_can_attach(struct cgroup *cgroup,
+ struct cgroup_taskset *tset)
{
return 0;
}
-static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss,
- struct cgroup *cgroup,
- struct cgroup_taskset *tset)
+static void mem_cgroup_cancel_attach(struct cgroup *cgroup,
+ struct cgroup_taskset *tset)
{
}
-static void mem_cgroup_move_task(struct cgroup_subsys *ss,
- struct cgroup *cont,
- struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(struct cgroup *cont,
+ struct cgroup_taskset *tset)
{
}
#endif
diff --git a/mm/memory.c b/mm/memory.c
index fa2f04e0337c..347e5fad1cfa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2447,7 +2447,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
* fails, we just zero-fill it. Live with it.
*/
if (unlikely(!src)) {
- void *kaddr = kmap_atomic(dst, KM_USER0);
+ void *kaddr = kmap_atomic(dst);
void __user *uaddr = (void __user *)(va & PAGE_MASK);
/*
@@ -2458,7 +2458,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
clear_page(kaddr);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
flush_dcache_page(dst);
} else
copy_user_highpage(dst, src, va, vma);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 06b145fb64ab..47296fee23db 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -640,10 +640,11 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
unsigned long vmstart;
unsigned long vmend;
- vma = find_vma_prev(mm, start, &prev);
+ vma = find_vma(mm, start);
if (!vma || vma->vm_start > start)
return -EFAULT;
+ prev = vma->vm_prev;
if (start > vma->vm_start)
prev = vma;
diff --git a/mm/migrate.c b/mm/migrate.c
index df141f60289e..1503b6b54ecb 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (!newpage)
return -ENOMEM;
- mem_cgroup_reset_owner(newpage);
-
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
goto out;
diff --git a/mm/mlock.c b/mm/mlock.c
index 4f4f53bdc65d..ef726e8aa8e9 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -385,10 +385,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
return -EINVAL;
if (end == start)
return 0;
- vma = find_vma_prev(current->mm, start, &prev);
+ vma = find_vma(current->mm, start);
if (!vma || vma->vm_start > start)
return -ENOMEM;
+ prev = vma->vm_prev;
if (start > vma->vm_start)
prev = vma;
diff --git a/mm/mmap.c b/mm/mmap.c
index 694a8625ab0d..39a68ddf38bd 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1279,8 +1279,9 @@ munmap_back:
vma->vm_pgoff = pgoff;
INIT_LIST_HEAD(&vma->anon_vma_chain);
+ error = -EINVAL; /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
+
if (file) {
- error = -EINVAL;
if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
goto free_vma;
if (vm_flags & VM_DENYWRITE) {
@@ -1306,6 +1307,8 @@ munmap_back:
pgoff = vma->vm_pgoff;
vm_flags = vma->vm_flags;
} else if (vm_flags & VM_SHARED) {
+ if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
+ goto free_vma;
error = shmem_zero_setup(vma);
if (error)
goto free_vma;
@@ -1618,7 +1621,6 @@ EXPORT_SYMBOL(find_vma);
/*
* Same as find_vma, but also return a pointer to the previous VMA in *pprev.
- * Note: pprev is set to NULL when return value is NULL.
*/
struct vm_area_struct *
find_vma_prev(struct mm_struct *mm, unsigned long addr,
@@ -1627,7 +1629,16 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
struct vm_area_struct *vma;
vma = find_vma(mm, addr);
- *pprev = vma ? vma->vm_prev : NULL;
+ if (vma) {
+ *pprev = vma->vm_prev;
+ } else {
+ struct rb_node *rb_node = mm->mm_rb.rb_node;
+ *pprev = NULL;
+ while (rb_node) {
+ *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+ rb_node = rb_node->rb_right;
+ }
+ }
return vma;
}
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 9599fa2d0e92..142ef4a1f480 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -262,10 +262,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
down_write(&current->mm->mmap_sem);
- vma = find_vma_prev(current->mm, start, &prev);
+ vma = find_vma(current->mm, start);
error = -ENOMEM;
if (!vma)
goto out;
+ prev = vma->vm_prev;
if (unlikely(grows & PROT_GROWSDOWN)) {
if (vma->vm_start >= end)
goto out;
diff --git a/mm/nommu.c b/mm/nommu.c
index b982290fd962..f59e170fceb4 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
+ mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping);
vma_prio_tree_insert(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
+ mutex_unlock(&mapping->i_mmap_mutex);
}
/* add the VMA to the tree */
@@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
if (vma->vm_file) {
mapping = vma->vm_file->f_mapping;
+ mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping);
vma_prio_tree_remove(vma, &mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
+ mutex_unlock(&mapping->i_mmap_mutex);
}
/* remove from the MM's tree and list */
@@ -775,8 +779,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
if (vma->vm_next)
vma->vm_next->vm_prev = vma->vm_prev;
-
- vma->vm_mm = NULL;
}
/*
@@ -2052,6 +2054,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
down_write(&nommu_region_sem);
+ mutex_lock(&inode->i_mapping->i_mmap_mutex);
/* search for VMAs that fall within the dead zone */
vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
@@ -2059,6 +2062,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
/* found one - only interested if it's shared out of the page
* cache */
if (vma->vm_flags & VM_SHARED) {
+ mutex_unlock(&inode->i_mapping->i_mmap_mutex);
up_write(&nommu_region_sem);
return -ETXTBSY; /* not quite true, but near enough */
}
@@ -2086,6 +2090,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
}
}
+ mutex_unlock(&inode->i_mapping->i_mmap_mutex);
up_write(&nommu_region_sem);
return 0;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d2186ecb36f7..a13ded1938f0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5236,6 +5236,7 @@ void *__init alloc_large_system_hash(const char *tablename,
max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
do_div(max, bucketsize);
}
+ max = min(max, 0x80000000ULL);
if (numentries > max)
numentries = max;
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index de1616aa9b1e..1ccbd714059c 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -379,13 +379,15 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
pgoff_t offset = swp_offset(ent);
struct swap_cgroup_ctrl *ctrl;
struct page *mappage;
+ struct swap_cgroup *sc;
ctrl = &swap_cgroup_ctrl[swp_type(ent)];
if (ctrlp)
*ctrlp = ctrl;
mappage = ctrl->map[offset / SC_PER_PAGE];
- return page_address(mappage) + offset % SC_PER_PAGE;
+ sc = page_address(mappage);
+ return sc + offset % SC_PER_PAGE;
}
/**
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 12a48a88c0d8..405d331804c3 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -184,8 +184,7 @@ static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
page_end - page_start);
}
- for (i = page_start; i < page_end; i++)
- __clear_bit(i, populated);
+ bitmap_clear(populated, page_start, page_end - page_start);
}
/**
diff --git a/mm/shmem.c b/mm/shmem.c
index d9c293952755..78307d5c5bd9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1656,9 +1656,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
}
inode->i_mapping->a_ops = &shmem_aops;
inode->i_op = &shmem_symlink_inode_operations;
- kaddr = kmap_atomic(page, KM_USER0);
+ kaddr = kmap_atomic(page);
memcpy(kaddr, symname, len);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
set_page_dirty(page);
unlock_page(page);
page_cache_release(page);
diff --git a/mm/swap.c b/mm/swap.c
index fff1ff7fb9ad..14380e9fbe33 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release);
void lru_add_page_tail(struct zone* zone,
struct page *page, struct page *page_tail)
{
- int active;
+ int uninitialized_var(active);
enum lru_list lru;
const int file = 0;
@@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone,
active = 0;
lru = LRU_INACTIVE_ANON;
}
- update_page_reclaim_stat(zone, page_tail, file, active);
} else {
SetPageUnevictable(page_tail);
lru = LRU_UNEVICTABLE;
@@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone,
list_head = page_tail->lru.prev;
list_move_tail(&page_tail->lru, list_head);
}
+
+ if (!PageUnevictable(page))
+ update_page_reclaim_stat(zone, page_tail, file, active);
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
SetPageLRU(page);
if (active)
SetPageActive(page);
- update_page_reclaim_stat(zone, page, file, active);
add_page_to_lru_list(zone, page, lru);
+ update_page_reclaim_stat(zone, page, file, active);
}
/*
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 470038a91873..ea6b32d61873 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
new_page = alloc_page_vma(gfp_mask, vma, addr);
if (!new_page)
break; /* Out of memory */
- /*
- * The memcg-specific accounting when moving
- * pages around the LRU lists relies on the
- * page's owner (memcg) to be valid. Usually,
- * pages are assigned to a new owner before
- * being put on the LRU list, but since this
- * is not the case here, the stale owner from
- * a previous allocation cycle must be reset.
- */
- mem_cgroup_reset_owner(new_page);
}
/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index f0d79296dd55..6bf67ab6e469 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2429,9 +2429,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
if (!(count & COUNT_CONTINUED))
goto out;
- map = kmap_atomic(list_page, KM_USER0) + offset;
+ map = kmap_atomic(list_page) + offset;
count = *map;
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
/*
* If this continuation count now has some space in it,
@@ -2474,7 +2474,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
offset &= ~PAGE_MASK;
page = list_entry(head->lru.next, struct page, lru);
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
goto init_map; /* jump over SWAP_CONT_MAX checks */
@@ -2484,26 +2484,26 @@ static bool swap_count_continued(struct swap_info_struct *si,
* Think of how you add 1 to 999
*/
while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
BUG_ON(page == head);
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
}
if (*map == SWAP_CONT_MAX) {
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
if (page == head)
return false; /* add count continuation */
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
init_map: *map = 0; /* we didn't zero the page */
}
*map += 1;
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
while (page != head) {
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
*map = COUNT_CONTINUED;
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
}
return true; /* incremented */
@@ -2514,22 +2514,22 @@ init_map: *map = 0; /* we didn't zero the page */
*/
BUG_ON(count != COUNT_CONTINUED);
while (*map == COUNT_CONTINUED) {
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.next, struct page, lru);
BUG_ON(page == head);
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
}
BUG_ON(*map == 0);
*map -= 1;
if (*map == 0)
count = 0;
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
while (page != head) {
- map = kmap_atomic(page, KM_USER0) + offset;
+ map = kmap_atomic(page) + offset;
*map = SWAP_CONT_MAX | count;
count = COUNT_CONTINUED;
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
page = list_entry(page->lru.prev, struct page, lru);
}
return count == COUNT_CONTINUED;
diff --git a/mm/truncate.c b/mm/truncate.c
index 632b15e29f74..a188058582e0 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -184,7 +184,7 @@ int invalidate_inode_page(struct page *page)
}
/**
- * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
+ * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
* @mapping: mapping to truncate
* @lstart: offset from which to truncate
* @lend: offset to which to truncate
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 86ce9a526c17..94dff883b449 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1906,9 +1906,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
* we can expect USER0 is not used (see vread/vwrite's
* function description)
*/
- void *map = kmap_atomic(p, KM_USER0);
+ void *map = kmap_atomic(p);
memcpy(buf, map + offset, length);
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
} else
memset(buf, 0, length);
@@ -1945,9 +1945,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
* we can expect USER0 is not used (see vread/vwrite's
* function description)
*/
- void *map = kmap_atomic(p, KM_USER0);
+ void *map = kmap_atomic(p);
memcpy(map + offset, buf, length);
- kunmap_atomic(map, KM_USER0);
+ kunmap_atomic(map);
}
addr += length;
buf += length;