diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/hugetlb.c | 2 | ||||
-rw-r--r-- | mm/memblock.c | 6 | ||||
-rw-r--r-- | mm/memcontrol.c | 5 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/nommu.c | 9 | ||||
-rw-r--r-- | mm/page_alloc.c | 1 |
6 files changed, 18 insertions, 7 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5f34bd8dda34..a876871f6be5 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2277,8 +2277,8 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, set_page_dirty(page); list_add(&page->lru, &page_list); } - spin_unlock(&mm->page_table_lock); flush_tlb_range(vma, start, end); + spin_unlock(&mm->page_table_lock); mmu_notifier_invalidate_range_end(mm, start, end); list_for_each_entry_safe(page, tmp, &page_list, lru) { page_remove_rmap(page); diff --git a/mm/memblock.c b/mm/memblock.c index 77b5f227e1d8..99f285599501 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -99,9 +99,6 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start, phys_addr_t this_start, this_end, cand; u64 i; - /* align @size to avoid excessive fragmentation on reserved array */ - size = round_up(size, align); - /* pump up @end */ if (end == MEMBLOCK_ALLOC_ACCESSIBLE) end = memblock.current_limit; @@ -731,6 +728,9 @@ static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, { phys_addr_t found; + /* align @size to avoid excessive fragmentation on reserved array */ + size = round_up(size, align); + found = memblock_find_in_range_node(0, max_addr, size, align, nid); if (found && !memblock_reserve(found, size)) return found; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6728a7ae6f2d..228d6461c12a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4414,6 +4414,9 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp, */ BUG_ON(!thresholds); + if (!thresholds->primary) + goto unlock; + usage = mem_cgroup_usage(memcg, type == _MEMSWAP); /* Check if a threshold crossed before removing */ @@ -4462,7 +4465,7 @@ swap_buffers: /* To be sure that nobody uses thresholds */ synchronize_rcu(); - +unlock: mutex_unlock(&memcg->thresholds_lock); } diff --git a/mm/mmap.c b/mm/mmap.c index 3f758c7f4c81..22e1a0b2f70c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1293,6 +1293,8 @@ munmap_back: pgoff = vma->vm_pgoff; vm_flags = vma->vm_flags; } else if (vm_flags & VM_SHARED) { + if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP))) + goto free_vma; error = shmem_zero_setup(vma); if (error) goto free_vma; diff --git a/mm/nommu.c b/mm/nommu.c index b982290fd962..f59e170fceb4 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) if (vma->vm_file) { mapping = vma->vm_file->f_mapping; + mutex_lock(&mapping->i_mmap_mutex); flush_dcache_mmap_lock(mapping); vma_prio_tree_insert(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); + mutex_unlock(&mapping->i_mmap_mutex); } /* add the VMA to the tree */ @@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) if (vma->vm_file) { mapping = vma->vm_file->f_mapping; + mutex_lock(&mapping->i_mmap_mutex); flush_dcache_mmap_lock(mapping); vma_prio_tree_remove(vma, &mapping->i_mmap); flush_dcache_mmap_unlock(mapping); + mutex_unlock(&mapping->i_mmap_mutex); } /* remove from the MM's tree and list */ @@ -775,8 +779,6 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) if (vma->vm_next) vma->vm_next->vm_prev = vma->vm_prev; - - vma->vm_mm = NULL; } /* @@ -2052,6 +2054,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; down_write(&nommu_region_sem); + mutex_lock(&inode->i_mapping->i_mmap_mutex); /* search for VMAs that fall within the dead zone */ vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, @@ -2059,6 +2062,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, /* found one - only interested if it's shared out of the page * cache */ if (vma->vm_flags & VM_SHARED) { + mutex_unlock(&inode->i_mapping->i_mmap_mutex); up_write(&nommu_region_sem); return -ETXTBSY; /* not quite true, but near enough */ } @@ -2086,6 +2090,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, } } + mutex_unlock(&inode->i_mapping->i_mmap_mutex); up_write(&nommu_region_sem); return 0; } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d2186ecb36f7..a13ded1938f0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5236,6 +5236,7 @@ void *__init alloc_large_system_hash(const char *tablename, max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4; do_div(max, bucketsize); } + max = min(max, 0x80000000ULL); if (numentries > max) numentries = max; |