diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 18 | ||||
-rw-r--r-- | mm/kmemleak.c | 3 | ||||
-rw-r--r-- | mm/memcontrol.c | 6 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 4 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/page_isolation.c | 3 | ||||
-rw-r--r-- | mm/shmem.c | 8 | ||||
-rw-r--r-- | mm/zsmalloc.c | 3 |
8 files changed, 21 insertions, 26 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 6dc4580df2af..000e7b3b9896 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi) flush_delayed_work(&bdi->wb.dwork); } -/* - * Called when the device behind @bdi has been removed or ejected. - * - * We can't really do much here except for reducing the dirty ratio at - * the moment. In the future we should be able to set a flag so that - * the filesystem can handle errors at mark_inode_dirty time instead - * of only at writeback time. - */ -void bdi_unregister(struct backing_dev_info *bdi) -{ - if (WARN_ON_ONCE(!bdi->dev)) - return; - - bdi_set_min_ratio(bdi, 0); -} -EXPORT_SYMBOL(bdi_unregister); - static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) { memset(wb, 0, sizeof(*wb)); @@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi) int i; bdi_wb_shutdown(bdi); + bdi_set_min_ratio(bdi, 0); WARN_ON(!list_empty(&bdi->work_list)); WARN_ON(delayed_work_pending(&bdi->wb.dwork)); diff --git a/mm/kmemleak.c b/mm/kmemleak.c index 5405aff5a590..f0fe4f2c1fa7 100644 --- a/mm/kmemleak.c +++ b/mm/kmemleak.c @@ -115,7 +115,8 @@ #define BYTES_PER_POINTER sizeof(void *) /* GFP bitmask for kmemleak internal allocations */ -#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \ +#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \ + __GFP_NOACCOUNT)) | \ __GFP_NORETRY | __GFP_NOMEMALLOC | \ __GFP_NOWARN) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 14c2f2017e37..a04225d372ba 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2323,6 +2323,8 @@ done_restock: css_get_many(&memcg->css, batch); if (batch > nr_pages) refill_stock(memcg, batch - nr_pages); + if (!(gfp_mask & __GFP_WAIT)) + goto done; /* * If the hierarchy is above the normal consumption range, * make the charging task trim their excess contribution. @@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); - /* XXX: caller holds IRQ-safe mapping->tree_lock */ - VM_BUG_ON(!irqs_disabled()); - + /* Caller disabled preemption with mapping->tree_lock */ mem_cgroup_charge_statistics(memcg, page, -1); memcg_check_events(memcg, page); } diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 457bde530cbe..9e88f749aa51 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1969,8 +1969,10 @@ void try_offline_node(int nid) * wait_table may be allocated from boot memory, * here only free if it's allocated by vmalloc. */ - if (is_vmalloc_addr(zone->wait_table)) + if (is_vmalloc_addr(zone->wait_table)) { vfree(zone->wait_table); + zone->wait_table = NULL; + } } } EXPORT_SYMBOL(try_offline_node); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ede26291d4aa..747743237d9f 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -2518,7 +2518,7 @@ static void __init check_numabalancing_enable(void) if (numabalancing_override) set_numabalancing_state(numabalancing_override == 1); - if (nr_node_ids > 1 && !numabalancing_override) { + if (num_online_nodes() > 1 && !numabalancing_override) { pr_info("%s automatic NUMA balancing. " "Configure with numa_balancing= or the " "kernel.numa_balancing sysctl", diff --git a/mm/page_isolation.c b/mm/page_isolation.c index 755a42c76eb4..303c908790ef 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -101,7 +101,8 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype) buddy_idx = __find_buddy_index(page_idx, order); buddy = page + (buddy_idx - page_idx); - if (!is_migrate_isolate_page(buddy)) { + if (pfn_valid_within(page_to_pfn(buddy)) && + !is_migrate_isolate_page(buddy)) { __isolate_free_page(page, order); kernel_map_pages(page, (1 << order), 1); set_page_refcounted(page); diff --git a/mm/shmem.c b/mm/shmem.c index a59087edf728..3759099d8ce4 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3395,7 +3395,13 @@ int shmem_zero_setup(struct vm_area_struct *vma) struct file *file; loff_t size = vma->vm_end - vma->vm_start; - file = shmem_file_setup("dev/zero", size, vma->vm_flags); + /* + * Cloning a new file under mmap_sem leads to a lock ordering conflict + * between XFS directory reading and selinux: since this file is only + * accessible to the user through its mapping, use S_PRIVATE flag to + * bypass file security, in the same way as shmem_kernel_file_setup(). + */ + file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE); if (IS_ERR(file)) return PTR_ERR(file); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 08bd7a3d464a..a8b5e749e84e 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool) static void destroy_handle_cache(struct zs_pool *pool) { - kmem_cache_destroy(pool->handle_cachep); + if (pool->handle_cachep) + kmem_cache_destroy(pool->handle_cachep); } static unsigned long alloc_handle(struct zs_pool *pool) |