From 58cb65487e92b47448d00a711c9f5922137d5678 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:25:54 -0700 Subject: proc/maps: make vm_is_stack() logic namespace-friendly - Rename vm_is_stack() to task_of_stack() and change it to return "struct task_struct *" rather than the global (and thus wrong in general) pid_t. - Add the new pid_of_stack() helper which calls task_of_stack() and uses the right namespace to report the correct pid_t. Unfortunately we need to define this helper twice, in task_mmu.c and in task_nommu.c. perhaps it makes sense to add fs/proc/util.c and move at least pid_of_stack/task_of_stack there to avoid the code duplication. - Change show_map_vma() and show_numa_map() to use the new helper. Signed-off-by: Oleg Nesterov Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: Greg Ungerer Cc: "Kirill A. Shutemov" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 0f4196a0bc20..28df70774b81 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1247,8 +1247,8 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma, !vma_growsup(vma->vm_next, addr); } -extern pid_t -vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group); +extern struct task_struct *task_of_stack(struct task_struct *task, + struct vm_area_struct *vma, bool in_group); extern unsigned long move_page_tables(struct vm_area_struct *vma, unsigned long old_addr, struct vm_area_struct *new_vma, -- cgit v1.2.3 From 07f361b2bee38896df8be17d8c3f8af3f3610606 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:00 -0700 Subject: mm/slab_common: move kmem_cache definition to internal header We don't need to keep kmem_cache definition in include/linux/slab.h if we don't need to inline kmem_cache_size(). According to my code inspection, this function is only called at lc_create() in lib/lru_cache.c which may be called at initialization phase of something, so we don't need to inline it. Therfore, move it to slab_common.c and move kmem_cache definition to internal header. After this change, we can change kmem_cache definition easily without full kernel build. For instance, we can turn on/off CONFIG_SLUB_STATS without full kernel build. [akpm@linux-foundation.org: export kmem_cache_size() to modules] [rdunlap@infradead.org: add header files to fix kmemcheck.c build errors] Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Zhang Yanfei Signed-off-by: Randy Dunlap Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 42 +----------------------------------------- mm/kmemcheck.c | 1 + mm/slab.h | 35 +++++++++++++++++++++++++++++++++++ mm/slab_common.c | 9 +++++++++ 4 files changed, 46 insertions(+), 41 deletions(-) (limited to 'include') diff --git a/include/linux/slab.h b/include/linux/slab.h index 1d9abb7d22a0..9062e4ad1787 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -158,31 +158,6 @@ size_t ksize(const void *); #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long) #endif -#ifdef CONFIG_SLOB -/* - * Common fields provided in kmem_cache by all slab allocators - * This struct is either used directly by the allocator (SLOB) - * or the allocator must include definitions for all fields - * provided in kmem_cache_common in their definition of kmem_cache. - * - * Once we can do anonymous structs (C11 standard) we could put a - * anonymous struct definition in these allocators so that the - * separate allocations in the kmem_cache structure of SLAB and - * SLUB is no longer needed. - */ -struct kmem_cache { - unsigned int object_size;/* The original size of the object */ - unsigned int size; /* The aligned/padded/added on size */ - unsigned int align; /* Alignment as calculated */ - unsigned long flags; /* Active flags on the slab */ - const char *name; /* Slab name for sysfs */ - int refcount; /* Use counter */ - void (*ctor)(void *); /* Called on object slot creation */ - struct list_head list; /* List of all slab caches on the system */ -}; - -#endif /* CONFIG_SLOB */ - /* * Kmalloc array related definitions */ @@ -363,14 +338,6 @@ kmem_cache_alloc_node_trace(struct kmem_cache *s, } #endif /* CONFIG_TRACING */ -#ifdef CONFIG_SLAB -#include -#endif - -#ifdef CONFIG_SLUB -#include -#endif - extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order); #ifdef CONFIG_TRACING @@ -650,14 +617,7 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node) return kmalloc_node(size, flags | __GFP_ZERO, node); } -/* - * Determine the size of a slab object - */ -static inline unsigned int kmem_cache_size(struct kmem_cache *s) -{ - return s->object_size; -} - +unsigned int kmem_cache_size(struct kmem_cache *s); void __init kmem_cache_init_late(void); #endif /* _LINUX_SLAB_H */ diff --git a/mm/kmemcheck.c b/mm/kmemcheck.c index fd814fd61319..cab58bb592d8 100644 --- a/mm/kmemcheck.c +++ b/mm/kmemcheck.c @@ -2,6 +2,7 @@ #include #include #include +#include "slab.h" #include void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node) diff --git a/mm/slab.h b/mm/slab.h index 0e0fdd365840..026e7c393f0b 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -4,6 +4,41 @@ * Internal slab definitions */ +#ifdef CONFIG_SLOB +/* + * Common fields provided in kmem_cache by all slab allocators + * This struct is either used directly by the allocator (SLOB) + * or the allocator must include definitions for all fields + * provided in kmem_cache_common in their definition of kmem_cache. + * + * Once we can do anonymous structs (C11 standard) we could put a + * anonymous struct definition in these allocators so that the + * separate allocations in the kmem_cache structure of SLAB and + * SLUB is no longer needed. + */ +struct kmem_cache { + unsigned int object_size;/* The original size of the object */ + unsigned int size; /* The aligned/padded/added on size */ + unsigned int align; /* Alignment as calculated */ + unsigned long flags; /* Active flags on the slab */ + const char *name; /* Slab name for sysfs */ + int refcount; /* Use counter */ + void (*ctor)(void *); /* Called on object slot creation */ + struct list_head list; /* List of all slab caches on the system */ +}; + +#endif /* CONFIG_SLOB */ + +#ifdef CONFIG_SLAB +#include +#endif + +#ifdef CONFIG_SLUB +#include +#endif + +#include + /* * State of the slab allocator. * diff --git a/mm/slab_common.c b/mm/slab_common.c index cabb842c4e7c..d7d8ffd0c306 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -30,6 +30,15 @@ LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); struct kmem_cache *kmem_cache; +/* + * Determine the size of a slab object + */ +unsigned int kmem_cache_size(struct kmem_cache *s) +{ + return s->object_size; +} +EXPORT_SYMBOL(kmem_cache_size); + #ifdef CONFIG_DEBUG_VM static int kmem_cache_sanity_check(const char *name, size_t size) { -- cgit v1.2.3 From 61f47105a2c9c60e950ca808b7560f776f9bfa31 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:02 -0700 Subject: mm/sl[ao]b: always track caller in kmalloc_(node_)track_caller() Now, we track caller if tracing or slab debugging is enabled. If they are disabled, we could save one argument passing overhead by calling __kmalloc(_node)(). But, I think that it would be marginal. Furthermore, default slab allocator, SLUB, doesn't use this technique so I think that it's okay to change this situation. After this change, we can turn on/off CONFIG_DEBUG_SLAB without full kernel build and remove some complicated '#if' defintion. It looks more benefitial to me. Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Zhang Yanfei Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 22 ---------------------- mm/slab.c | 18 ------------------ mm/slob.c | 2 -- 3 files changed, 42 deletions(-) (limited to 'include') diff --git a/include/linux/slab.h b/include/linux/slab.h index 9062e4ad1787..c265bec6a57d 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -549,37 +549,15 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) * allocator where we care about the real place the memory allocation * request comes from. */ -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ - (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ - (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long); #define kmalloc_track_caller(size, flags) \ __kmalloc_track_caller(size, flags, _RET_IP_) -#else -#define kmalloc_track_caller(size, flags) \ - __kmalloc(size, flags) -#endif /* DEBUG_SLAB */ #ifdef CONFIG_NUMA -/* - * kmalloc_node_track_caller is a special version of kmalloc_node that - * records the calling function of the routine calling it for slab leak - * tracking instead of just the calling function (confusing, eh?). - * It's useful when the call to kmalloc_node comes from a widely-used - * standard allocator where we care about the real place the memory - * allocation request comes from. - */ -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \ - (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \ - (defined(CONFIG_SLOB) && defined(CONFIG_TRACING)) extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long); #define kmalloc_node_track_caller(size, flags, node) \ __kmalloc_node_track_caller(size, flags, node, \ _RET_IP_) -#else -#define kmalloc_node_track_caller(size, flags, node) \ - __kmalloc_node(size, flags, node) -#endif #else /* CONFIG_NUMA */ diff --git a/mm/slab.c b/mm/slab.c index 7c52b3890d25..c52bc5aa6ba0 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3496,7 +3496,6 @@ __do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller) return kmem_cache_alloc_node_trace(cachep, flags, node, size); } -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) void *__kmalloc_node(size_t size, gfp_t flags, int node) { return __do_kmalloc_node(size, flags, node, _RET_IP_); @@ -3509,13 +3508,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t flags, return __do_kmalloc_node(size, flags, node, caller); } EXPORT_SYMBOL(__kmalloc_node_track_caller); -#else -void *__kmalloc_node(size_t size, gfp_t flags, int node) -{ - return __do_kmalloc_node(size, flags, node, 0); -} -EXPORT_SYMBOL(__kmalloc_node); -#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */ #endif /* CONFIG_NUMA */ /** @@ -3541,8 +3533,6 @@ static __always_inline void *__do_kmalloc(size_t size, gfp_t flags, return ret; } - -#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING) void *__kmalloc(size_t size, gfp_t flags) { return __do_kmalloc(size, flags, _RET_IP_); @@ -3555,14 +3545,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller) } EXPORT_SYMBOL(__kmalloc_track_caller); -#else -void *__kmalloc(size_t size, gfp_t flags) -{ - return __do_kmalloc(size, flags, 0); -} -EXPORT_SYMBOL(__kmalloc); -#endif - /** * kmem_cache_free - Deallocate an object * @cachep: The cache the allocation was from. diff --git a/mm/slob.c b/mm/slob.c index 21980e0f39a8..96a86206a26b 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -468,7 +468,6 @@ void *__kmalloc(size_t size, gfp_t gfp) } EXPORT_SYMBOL(__kmalloc); -#ifdef CONFIG_TRACING void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) { return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, caller); @@ -481,7 +480,6 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfp, return __do_kmalloc_node(size, gfp, node, caller); } #endif -#endif void kfree(const void *block) { -- cgit v1.2.3 From ad2c8144418c6a81cefe65379fd47bbe8344cef2 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:13 -0700 Subject: topology: add support for node_to_mem_node() to determine the fallback node Anton noticed (http://www.spinics.net/lists/linux-mm/msg67489.html) that on ppc LPARs with memoryless nodes, a large amount of memory was consumed by slabs and was marked unreclaimable. He tracked it down to slab deactivations in the SLUB core when we allocate remotely, leading to poor efficiency always when memoryless nodes are present. After much discussion, Joonsoo provided a few patches that help significantly. They don't resolve the problem altogether: - memory hotplug still needs testing, that is when a memoryless node becomes memory-ful, we want to dtrt - there are other reasons for going off-node than memoryless nodes, e.g., fully exhausted local nodes Neither case is resolved with this series, but I don't think that should block their acceptance, as they can be explored/resolved with follow-on patches. The series consists of: [1/3] topology: add support for node_to_mem_node() to determine the fallback node [2/3] slub: fallback to node_to_mem_node() node if allocating on memoryless node - Joonsoo's patches to cache the nearest node with memory for each NUMA node [3/3] Partial revert of 81c98869faa5 (""kthread: ensure locality of task_struct allocations") - At Tejun's request, keep the knowledge of memoryless node fallback to the allocator core. This patch (of 3): We need to determine the fallback node in slub allocator if the allocation target node is memoryless node. Without it, the SLUB wrongly select the node which has no memory and can't use a partial slab, because of node mismatch. Introduced function, node_to_mem_node(X), will return a node Y with memory that has the nearest distance. If X is memoryless node, it will return nearest distance node, but, if X is normal node, it will return itself. We will use this function in following patch to determine the fallback node. Signed-off-by: Joonsoo Kim Signed-off-by: Nishanth Aravamudan Cc: David Rientjes Cc: Han Pingtian Cc: Pekka Enberg Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Cc: Anton Blanchard Cc: Christoph Lameter Cc: Wanpeng Li Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/topology.h | 17 +++++++++++++++++ mm/page_alloc.c | 1 + 2 files changed, 18 insertions(+) (limited to 'include') diff --git a/include/linux/topology.h b/include/linux/topology.h index dda6ee521e74..909b6e43b694 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -119,11 +119,20 @@ static inline int numa_node_id(void) * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem(). */ DECLARE_PER_CPU(int, _numa_mem_); +extern int _node_numa_mem_[MAX_NUMNODES]; #ifndef set_numa_mem static inline void set_numa_mem(int node) { this_cpu_write(_numa_mem_, node); + _node_numa_mem_[numa_node_id()] = node; +} +#endif + +#ifndef node_to_mem_node +static inline int node_to_mem_node(int node) +{ + return _node_numa_mem_[node]; } #endif @@ -146,6 +155,7 @@ static inline int cpu_to_mem(int cpu) static inline void set_cpu_numa_mem(int cpu, int node) { per_cpu(_numa_mem_, cpu) = node; + _node_numa_mem_[cpu_to_node(cpu)] = node; } #endif @@ -159,6 +169,13 @@ static inline int numa_mem_id(void) } #endif +#ifndef node_to_mem_node +static inline int node_to_mem_node(int node) +{ + return node; +} +#endif + #ifndef cpu_to_mem static inline int cpu_to_mem(int cpu) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index eee961958021..f3bc59f2ed52 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -85,6 +85,7 @@ EXPORT_PER_CPU_SYMBOL(numa_node); */ DEFINE_PER_CPU(int, _numa_mem_); /* Kernel "local memory" node */ EXPORT_PER_CPU_SYMBOL(_numa_mem_); +int _node_numa_mem_[MAX_NUMNODES]; #endif /* -- cgit v1.2.3 From bf0dea23a9c094ae869a88bb694fbe966671bf6d Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Thu, 9 Oct 2014 15:26:27 -0700 Subject: mm/slab: use percpu allocator for cpu cache Because of chicken and egg problem, initialization of SLAB is really complicated. We need to allocate cpu cache through SLAB to make the kmem_cache work, but before initialization of kmem_cache, allocation through SLAB is impossible. On the other hand, SLUB does initialization in a more simple way. It uses percpu allocator to allocate cpu cache so there is no chicken and egg problem. So, this patch try to use percpu allocator in SLAB. This simplifies the initialization step in SLAB so that we could maintain SLAB code more easily. In my testing there is no performance difference. This implementation relies on percpu allocator. Because percpu allocator uses vmalloc address space, vmalloc address space could be exhausted by this change on many cpu system with *32 bit* kernel. This implementation can cover 1024 cpus in worst case by following calculation. Worst: 1024 cpus * 4 bytes for pointer * 300 kmem_caches * 120 objects per cpu_cache = 140 MB Normal: 1024 cpus * 4 bytes for pointer * 150 kmem_caches(slab merge) * 80 objects per cpu_cache = 46 MB Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Jeremiah Mahler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab_def.h | 20 +--- mm/slab.c | 239 +++++++++++++++-------------------------------- mm/slab.h | 1 - 3 files changed, 78 insertions(+), 182 deletions(-) (limited to 'include') diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 8235dfbb3b05..b869d1662ba3 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -8,6 +8,8 @@ */ struct kmem_cache { + struct array_cache __percpu *cpu_cache; + /* 1) Cache tunables. Protected by slab_mutex */ unsigned int batchcount; unsigned int limit; @@ -71,23 +73,7 @@ struct kmem_cache { struct memcg_cache_params *memcg_params; #endif -/* 6) per-cpu/per-node data, touched during every alloc/free */ - /* - * We put array[] at the end of kmem_cache, because we want to size - * this array to nr_cpu_ids slots instead of NR_CPUS - * (see kmem_cache_init()) - * We still use [NR_CPUS] and not [1] or [0] because cache_cache - * is statically defined, so we reserve the max number of cpus. - * - * We also need to guarantee that the list is able to accomodate a - * pointer for each node since "nodelists" uses the remainder of - * available pointers. - */ - struct kmem_cache_node **node; - struct array_cache *array[NR_CPUS + MAX_NUMNODES]; - /* - * Do not add fields after array[] - */ + struct kmem_cache_node *node[MAX_NUMNODES]; }; #endif /* _LINUX_SLAB_DEF_H */ diff --git a/mm/slab.c b/mm/slab.c index 328233a724af..655d65c3f010 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -237,11 +237,10 @@ struct arraycache_init { /* * Need this for bootstrapping a per node allocator. */ -#define NUM_INIT_LISTS (3 * MAX_NUMNODES) +#define NUM_INIT_LISTS (2 * MAX_NUMNODES) static struct kmem_cache_node __initdata init_kmem_cache_node[NUM_INIT_LISTS]; #define CACHE_CACHE 0 -#define SIZE_AC MAX_NUMNODES -#define SIZE_NODE (2 * MAX_NUMNODES) +#define SIZE_NODE (MAX_NUMNODES) static int drain_freelist(struct kmem_cache *cache, struct kmem_cache_node *n, int tofree); @@ -253,7 +252,6 @@ static void cache_reap(struct work_struct *unused); static int slab_early_init = 1; -#define INDEX_AC kmalloc_index(sizeof(struct arraycache_init)) #define INDEX_NODE kmalloc_index(sizeof(struct kmem_cache_node)) static void kmem_cache_node_init(struct kmem_cache_node *parent) @@ -458,9 +456,6 @@ static inline unsigned int obj_to_index(const struct kmem_cache *cache, return reciprocal_divide(offset, cache->reciprocal_buffer_size); } -static struct arraycache_init initarray_generic = - { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; - /* internal cache of cache description objs */ static struct kmem_cache kmem_cache_boot = { .batchcount = 1, @@ -476,7 +471,7 @@ static DEFINE_PER_CPU(struct delayed_work, slab_reap_work); static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep) { - return cachep->array[smp_processor_id()]; + return this_cpu_ptr(cachep->cpu_cache); } static size_t calculate_freelist_size(int nr_objs, size_t align) @@ -1096,24 +1091,25 @@ static void cpuup_canceled(long cpu) struct alien_cache **alien; LIST_HEAD(list); - /* cpu is dead; no one can alloc from it. */ - nc = cachep->array[cpu]; - cachep->array[cpu] = NULL; n = get_node(cachep, node); - if (!n) - goto free_array_cache; + continue; spin_lock_irq(&n->list_lock); /* Free limit for this kmem_cache_node */ n->free_limit -= cachep->batchcount; - if (nc) + + /* cpu is dead; no one can alloc from it. */ + nc = per_cpu_ptr(cachep->cpu_cache, cpu); + if (nc) { free_block(cachep, nc->entry, nc->avail, node, &list); + nc->avail = 0; + } if (!cpumask_empty(mask)) { spin_unlock_irq(&n->list_lock); - goto free_array_cache; + goto free_slab; } shared = n->shared; @@ -1133,9 +1129,9 @@ static void cpuup_canceled(long cpu) drain_alien_cache(cachep, alien); free_alien_cache(alien); } -free_array_cache: + +free_slab: slabs_destroy(cachep, &list); - kfree(nc); } /* * In the previous loop, all the objects were freed to @@ -1172,32 +1168,23 @@ static int cpuup_prepare(long cpu) * array caches */ list_for_each_entry(cachep, &slab_caches, list) { - struct array_cache *nc; struct array_cache *shared = NULL; struct alien_cache **alien = NULL; - nc = alloc_arraycache(node, cachep->limit, - cachep->batchcount, GFP_KERNEL); - if (!nc) - goto bad; if (cachep->shared) { shared = alloc_arraycache(node, cachep->shared * cachep->batchcount, 0xbaadf00d, GFP_KERNEL); - if (!shared) { - kfree(nc); + if (!shared) goto bad; - } } if (use_alien_caches) { alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL); if (!alien) { kfree(shared); - kfree(nc); goto bad; } } - cachep->array[cpu] = nc; n = get_node(cachep, node); BUG_ON(!n); @@ -1388,15 +1375,6 @@ static void __init set_up_node(struct kmem_cache *cachep, int index) } } -/* - * The memory after the last cpu cache pointer is used for the - * the node pointer. - */ -static void setup_node_pointer(struct kmem_cache *cachep) -{ - cachep->node = (struct kmem_cache_node **)&cachep->array[nr_cpu_ids]; -} - /* * Initialisation. Called after the page allocator have been initialised and * before smp_init(). @@ -1408,7 +1386,6 @@ void __init kmem_cache_init(void) BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)); kmem_cache = &kmem_cache_boot; - setup_node_pointer(kmem_cache); if (num_possible_nodes() == 1) use_alien_caches = 0; @@ -1416,8 +1393,6 @@ void __init kmem_cache_init(void) for (i = 0; i < NUM_INIT_LISTS; i++) kmem_cache_node_init(&init_kmem_cache_node[i]); - set_up_node(kmem_cache, CACHE_CACHE); - /* * Fragmentation resistance on low memory - only use bigger * page orders on machines with more than 32MB of memory if @@ -1452,49 +1427,22 @@ void __init kmem_cache_init(void) * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids */ create_boot_cache(kmem_cache, "kmem_cache", - offsetof(struct kmem_cache, array[nr_cpu_ids]) + + offsetof(struct kmem_cache, node) + nr_node_ids * sizeof(struct kmem_cache_node *), SLAB_HWCACHE_ALIGN); list_add(&kmem_cache->list, &slab_caches); - - /* 2+3) create the kmalloc caches */ + slab_state = PARTIAL; /* - * Initialize the caches that provide memory for the array cache and the - * kmem_cache_node structures first. Without this, further allocations will - * bug. + * Initialize the caches that provide memory for the kmem_cache_node + * structures first. Without this, further allocations will bug. */ - - kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac", - kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS); - - if (INDEX_AC != INDEX_NODE) - kmalloc_caches[INDEX_NODE] = - create_kmalloc_cache("kmalloc-node", + kmalloc_caches[INDEX_NODE] = create_kmalloc_cache("kmalloc-node", kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); + slab_state = PARTIAL_NODE; slab_early_init = 0; - /* 4) Replace the bootstrap head arrays */ - { - struct array_cache *ptr; - - ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); - - memcpy(ptr, cpu_cache_get(kmem_cache), - sizeof(struct arraycache_init)); - - kmem_cache->array[smp_processor_id()] = ptr; - - ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT); - - BUG_ON(cpu_cache_get(kmalloc_caches[INDEX_AC]) - != &initarray_generic.cache); - memcpy(ptr, cpu_cache_get(kmalloc_caches[INDEX_AC]), - sizeof(struct arraycache_init)); - - kmalloc_caches[INDEX_AC]->array[smp_processor_id()] = ptr; - } /* 5) Replace the bootstrap kmem_cache_node */ { int nid; @@ -1502,13 +1450,8 @@ void __init kmem_cache_init(void) for_each_online_node(nid) { init_list(kmem_cache, &init_kmem_cache_node[CACHE_CACHE + nid], nid); - init_list(kmalloc_caches[INDEX_AC], - &init_kmem_cache_node[SIZE_AC + nid], nid); - - if (INDEX_AC != INDEX_NODE) { - init_list(kmalloc_caches[INDEX_NODE], + init_list(kmalloc_caches[INDEX_NODE], &init_kmem_cache_node[SIZE_NODE + nid], nid); - } } } @@ -2041,56 +1984,53 @@ static size_t calculate_slab_order(struct kmem_cache *cachep, return left_over; } +static struct array_cache __percpu *alloc_kmem_cache_cpus( + struct kmem_cache *cachep, int entries, int batchcount) +{ + int cpu; + size_t size; + struct array_cache __percpu *cpu_cache; + + size = sizeof(void *) * entries + sizeof(struct array_cache); + cpu_cache = __alloc_percpu(size, 0); + + if (!cpu_cache) + return NULL; + + for_each_possible_cpu(cpu) { + init_arraycache(per_cpu_ptr(cpu_cache, cpu), + entries, batchcount); + } + + return cpu_cache; +} + static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp) { if (slab_state >= FULL) return enable_cpucache(cachep, gfp); + cachep->cpu_cache = alloc_kmem_cache_cpus(cachep, 1, 1); + if (!cachep->cpu_cache) + return 1; + if (slab_state == DOWN) { - /* - * Note: Creation of first cache (kmem_cache). - * The setup_node is taken care - * of by the caller of __kmem_cache_create - */ - cachep->array[smp_processor_id()] = &initarray_generic.cache; - slab_state = PARTIAL; + /* Creation of first cache (kmem_cache). */ + set_up_node(kmem_cache, CACHE_CACHE); } else if (slab_state == PARTIAL) { - /* - * Note: the second kmem_cache_create must create the cache - * that's used by kmalloc(24), otherwise the creation of - * further caches will BUG(). - */ - cachep->array[smp_processor_id()] = &initarray_generic.cache; - - /* - * If the cache that's used by kmalloc(sizeof(kmem_cache_node)) is - * the second cache, then we need to set up all its node/, - * otherwise the creation of further caches will BUG(). - */ - set_up_node(cachep, SIZE_AC); - if (INDEX_AC == INDEX_NODE) - slab_state = PARTIAL_NODE; - else - slab_state = PARTIAL_ARRAYCACHE; + /* For kmem_cache_node */ + set_up_node(cachep, SIZE_NODE); } else { - /* Remaining boot caches */ - cachep->array[smp_processor_id()] = - kmalloc(sizeof(struct arraycache_init), gfp); + int node; - if (slab_state == PARTIAL_ARRAYCACHE) { - set_up_node(cachep, SIZE_NODE); - slab_state = PARTIAL_NODE; - } else { - int node; - for_each_online_node(node) { - cachep->node[node] = - kmalloc_node(sizeof(struct kmem_cache_node), - gfp, node); - BUG_ON(!cachep->node[node]); - kmem_cache_node_init(cachep->node[node]); - } + for_each_online_node(node) { + cachep->node[node] = kmalloc_node( + sizeof(struct kmem_cache_node), gfp, node); + BUG_ON(!cachep->node[node]); + kmem_cache_node_init(cachep->node[node]); } } + cachep->node[numa_mem_id()]->next_reap = jiffies + REAPTIMEOUT_NODE + ((unsigned long)cachep) % REAPTIMEOUT_NODE; @@ -2213,7 +2153,6 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags) else gfp = GFP_NOWAIT; - setup_node_pointer(cachep); #if DEBUG /* @@ -2470,8 +2409,7 @@ int __kmem_cache_shutdown(struct kmem_cache *cachep) if (rc) return rc; - for_each_online_cpu(i) - kfree(cachep->array[i]); + free_percpu(cachep->cpu_cache); /* NUMA: free the node structures */ for_each_kmem_cache_node(cachep, i, n) { @@ -3719,72 +3657,45 @@ fail: return -ENOMEM; } -struct ccupdate_struct { - struct kmem_cache *cachep; - struct array_cache *new[0]; -}; - -static void do_ccupdate_local(void *info) -{ - struct ccupdate_struct *new = info; - struct array_cache *old; - - check_irq_off(); - old = cpu_cache_get(new->cachep); - - new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()]; - new->new[smp_processor_id()] = old; -} - /* Always called with the slab_mutex held */ static int __do_tune_cpucache(struct kmem_cache *cachep, int limit, int batchcount, int shared, gfp_t gfp) { - struct ccupdate_struct *new; - int i; + struct array_cache __percpu *cpu_cache, *prev; + int cpu; - new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *), - gfp); - if (!new) + cpu_cache = alloc_kmem_cache_cpus(cachep, limit, batchcount); + if (!cpu_cache) return -ENOMEM; - for_each_online_cpu(i) { - new->new[i] = alloc_arraycache(cpu_to_mem(i), limit, - batchcount, gfp); - if (!new->new[i]) { - for (i--; i >= 0; i--) - kfree(new->new[i]); - kfree(new); - return -ENOMEM; - } - } - new->cachep = cachep; - - on_each_cpu(do_ccupdate_local, (void *)new, 1); + prev = cachep->cpu_cache; + cachep->cpu_cache = cpu_cache; + kick_all_cpus_sync(); check_irq_on(); cachep->batchcount = batchcount; cachep->limit = limit; cachep->shared = shared; - for_each_online_cpu(i) { + if (!prev) + goto alloc_node; + + for_each_online_cpu(cpu) { LIST_HEAD(list); - struct array_cache *ccold = new->new[i]; int node; struct kmem_cache_node *n; + struct array_cache *ac = per_cpu_ptr(prev, cpu); - if (!ccold) - continue; - - node = cpu_to_mem(i); + node = cpu_to_mem(cpu); n = get_node(cachep, node); spin_lock_irq(&n->list_lock); - free_block(cachep, ccold->entry, ccold->avail, node, &list); + free_block(cachep, ac->entry, ac->avail, node, &list); spin_unlock_irq(&n->list_lock); slabs_destroy(cachep, &list); - kfree(ccold); } - kfree(new); + free_percpu(prev); + +alloc_node: return alloc_kmem_cache_node(cachep, gfp); } diff --git a/mm/slab.h b/mm/slab.h index 50d29d716db4..ab019e63e3c2 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -50,7 +50,6 @@ struct kmem_cache { enum slab_state { DOWN, /* No slab functionality yet */ PARTIAL, /* SLUB: kmem_cache_node available */ - PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ UP, /* Slab caches usable but not all extras yet */ FULL /* Everything is working */ -- cgit v1.2.3 From ed2f240094f900833ac06f533ab8bbcf0a1e8199 Mon Sep 17 00:00:00 2001 From: Zhang Zhen Date: Thu, 9 Oct 2014 15:26:31 -0700 Subject: memory-hotplug: add sysfs valid_zones attribute Currently memory-hotplug has two limits: 1. If the memory block is in ZONE_NORMAL, you can change it to ZONE_MOVABLE, but this memory block must be adjacent to ZONE_MOVABLE. 2. If the memory block is in ZONE_MOVABLE, you can change it to ZONE_NORMAL, but this memory block must be adjacent to ZONE_NORMAL. With this patch, we can easy to know a memory block can be onlined to which zone, and don't need to know the above two limits. Updated the related Documentation. [akpm@linux-foundation.org: use conventional comment layout] [akpm@linux-foundation.org: fix build with CONFIG_MEMORY_HOTREMOVE=n] [akpm@linux-foundation.org: remove unused local zone_prev] Signed-off-by: Zhang Zhen Cc: Dave Hansen Cc: David Rientjes Cc: Toshi Kani Cc: Yasuaki Ishimatsu Cc: Naoya Horiguchi Cc: Wang Nan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ABI/testing/sysfs-devices-memory | 8 +++++ Documentation/memory-hotplug.txt | 11 ++++++- drivers/base/memory.c | 42 ++++++++++++++++++++++++++ include/linux/memory_hotplug.h | 1 + mm/memory_hotplug.c | 2 +- 5 files changed, 62 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/Documentation/ABI/testing/sysfs-devices-memory b/Documentation/ABI/testing/sysfs-devices-memory index 7405de26ee60..deef3b5723cf 100644 --- a/Documentation/ABI/testing/sysfs-devices-memory +++ b/Documentation/ABI/testing/sysfs-devices-memory @@ -61,6 +61,14 @@ Users: hotplug memory remove tools http://www.ibm.com/developerworks/wikis/display/LinuxP/powerpc-utils +What: /sys/devices/system/memory/memoryX/valid_zones +Date: July 2014 +Contact: Zhang Zhen +Description: + The file /sys/devices/system/memory/memoryX/valid_zones is + read-only and is designed to show which zone this memory + block can be onlined to. + What: /sys/devices/system/memoryX/nodeY Date: October 2009 Contact: Linux Memory Management list diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt index 45134dc23854..ea03abfc97e9 100644 --- a/Documentation/memory-hotplug.txt +++ b/Documentation/memory-hotplug.txt @@ -155,6 +155,7 @@ Under each memory block, you can see 4 files: /sys/devices/system/memory/memoryXXX/phys_device /sys/devices/system/memory/memoryXXX/state /sys/devices/system/memory/memoryXXX/removable +/sys/devices/system/memory/memoryXXX/valid_zones 'phys_index' : read-only and contains memory block id, same as XXX. 'state' : read-write @@ -170,6 +171,15 @@ Under each memory block, you can see 4 files: block is removable and a value of 0 indicates that it is not removable. A memory block is removable only if every section in the block is removable. +'valid_zones' : read-only: designed to show which zones this memory block + can be onlined to. + The first column shows it's default zone. + "memory6/valid_zones: Normal Movable" shows this memoryblock + can be onlined to ZONE_NORMAL by default and to ZONE_MOVABLE + by online_movable. + "memory7/valid_zones: Movable Normal" shows this memoryblock + can be onlined to ZONE_MOVABLE by default and to ZONE_NORMAL + by online_kernel. NOTE: These directories/files appear after physical memory hotplug phase. @@ -408,7 +418,6 @@ node if necessary. - allowing memory hot-add to ZONE_MOVABLE. maybe we need some switch like sysctl or new control file. - showing memory block and physical device relationship. - - showing memory block is under ZONE_MOVABLE or not - test and make it better memory offlining. - support HugeTLB page migration and offlining. - memmap removing at memory offline. diff --git a/drivers/base/memory.c b/drivers/base/memory.c index a2e13e250bba..7c5d87191b28 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -373,6 +373,45 @@ static ssize_t show_phys_device(struct device *dev, return sprintf(buf, "%d\n", mem->phys_device); } +#ifdef CONFIG_MEMORY_HOTREMOVE +static ssize_t show_valid_zones(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct memory_block *mem = to_memory_block(dev); + unsigned long start_pfn, end_pfn; + unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block; + struct page *first_page; + struct zone *zone; + + start_pfn = section_nr_to_pfn(mem->start_section_nr); + end_pfn = start_pfn + nr_pages; + first_page = pfn_to_page(start_pfn); + + /* The block contains more than one zone can not be offlined. */ + if (!test_pages_in_a_zone(start_pfn, end_pfn)) + return sprintf(buf, "none\n"); + + zone = page_zone(first_page); + + if (zone_idx(zone) == ZONE_MOVABLE - 1) { + /*The mem block is the last memoryblock of this zone.*/ + if (end_pfn == zone_end_pfn(zone)) + return sprintf(buf, "%s %s\n", + zone->name, (zone + 1)->name); + } + + if (zone_idx(zone) == ZONE_MOVABLE) { + /*The mem block is the first memoryblock of ZONE_MOVABLE.*/ + if (start_pfn == zone->zone_start_pfn) + return sprintf(buf, "%s %s\n", + zone->name, (zone - 1)->name); + } + + return sprintf(buf, "%s\n", zone->name); +} +static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL); +#endif + static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL); static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state); static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL); @@ -523,6 +562,9 @@ static struct attribute *memory_memblk_attrs[] = { &dev_attr_state.attr, &dev_attr_phys_device.attr, &dev_attr_removable.attr, +#ifdef CONFIG_MEMORY_HOTREMOVE + &dev_attr_valid_zones.attr, +#endif NULL }; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index d9524c49d767..8f1a41951df9 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -84,6 +84,7 @@ extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages); extern int add_one_highpage(struct page *page, int pfn, int bad_ppro); /* VM interface that may be used by firmware interface */ extern int online_pages(unsigned long, unsigned long, int); +extern int test_pages_in_a_zone(unsigned long, unsigned long); extern void __offline_isolated_pages(unsigned long, unsigned long); typedef void (*online_page_callback_t)(struct page *page); diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 2ff8c2325e96..29d8693d0c61 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1307,7 +1307,7 @@ int is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) /* * Confirm all pages in a range [start, end) is belongs to the same zone. */ -static int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) +int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; struct zone *zone = NULL; -- cgit v1.2.3 From 6a33979d5bd7521497121c5ae4435d7003115a0f Mon Sep 17 00:00:00 2001 From: Mel Gorman Date: Thu, 9 Oct 2014 15:26:33 -0700 Subject: mm: remove misleading ARCH_USES_NUMA_PROT_NONE ARCH_USES_NUMA_PROT_NONE was defined for architectures that implemented _PAGE_NUMA using _PROT_NONE. This saved using an additional PTE bit and relied on the fact that PROT_NONE vmas were skipped by the NUMA hinting fault scanner. This was found to be conceptually confusing with a lot of implicit assumptions and it was asked that an alternative be found. Commit c46a7c81 "x86: define _PAGE_NUMA by reusing software bits on the PMD and PTE levels" redefined _PAGE_NUMA on x86 to be one of the swap PTE bits and shrunk the maximum possible swap size but it did not go far enough. There are no architectures that reuse _PROT_NONE as _PROT_NUMA but the relics still exist. This patch removes ARCH_USES_NUMA_PROT_NONE and removes some unnecessary duplication in powerpc vs the generic implementation by defining the types the core NUMA helpers expected to exist from x86 with their ppc64 equivalent. This necessitated that a PTE bit mask be created that identified the bits that distinguish present from NUMA pte entries but it is expected this will only differ between arches based on _PAGE_PROTNONE. The naming for the generic helpers was taken from x86 originally but ppc64 has types that are equivalent for the purposes of the helper so they are mapped instead of duplicating code. Signed-off-by: Mel Gorman Cc: Hugh Dickins Cc: "Kirill A. Shutemov" Cc: Rik van Riel Cc: Johannes Weiner Cc: Cyrill Gorcunov Reviewed-by: Aneesh Kumar K.V Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/pgtable.h | 57 ++++++++--------------------------- arch/powerpc/include/asm/pte-common.h | 5 +++ arch/x86/Kconfig | 1 - arch/x86/include/asm/pgtable_types.h | 14 +++++++++ include/asm-generic/pgtable.h | 27 ++++++----------- init/Kconfig | 11 ------- 6 files changed, 40 insertions(+), 75 deletions(-) (limited to 'include') diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index d98c1ecc3266..f60d4ea8b50c 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -38,10 +38,9 @@ static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) static inline pgprot_t pte_pgprot(pte_t pte) { return __pgprot(pte_val(pte) & PAGE_PROT_BITS); } #ifdef CONFIG_NUMA_BALANCING - static inline int pte_present(pte_t pte) { - return pte_val(pte) & (_PAGE_PRESENT | _PAGE_NUMA); + return pte_val(pte) & _PAGE_NUMA_MASK; } #define pte_present_nonuma pte_present_nonuma @@ -50,37 +49,6 @@ static inline int pte_present_nonuma(pte_t pte) return pte_val(pte) & (_PAGE_PRESENT); } -#define pte_numa pte_numa -static inline int pte_numa(pte_t pte) -{ - return (pte_val(pte) & - (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA; -} - -#define pte_mknonnuma pte_mknonnuma -static inline pte_t pte_mknonnuma(pte_t pte) -{ - pte_val(pte) &= ~_PAGE_NUMA; - pte_val(pte) |= _PAGE_PRESENT | _PAGE_ACCESSED; - return pte; -} - -#define pte_mknuma pte_mknuma -static inline pte_t pte_mknuma(pte_t pte) -{ - /* - * We should not set _PAGE_NUMA on non present ptes. Also clear the - * present bit so that hash_page will return 1 and we collect this - * as numa fault. - */ - if (pte_present(pte)) { - pte_val(pte) |= _PAGE_NUMA; - pte_val(pte) &= ~_PAGE_PRESENT; - } else - VM_BUG_ON(1); - return pte; -} - #define ptep_set_numa ptep_set_numa static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep) @@ -92,12 +60,6 @@ static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr, return; } -#define pmd_numa pmd_numa -static inline int pmd_numa(pmd_t pmd) -{ - return pte_numa(pmd_pte(pmd)); -} - #define pmdp_set_numa pmdp_set_numa static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) @@ -109,16 +71,21 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, return; } -#define pmd_mknonnuma pmd_mknonnuma -static inline pmd_t pmd_mknonnuma(pmd_t pmd) +/* + * Generic NUMA pte helpers expect pteval_t and pmdval_t types to exist + * which was inherited from x86. For the purposes of powerpc pte_basic_t and + * pmd_t are equivalent + */ +#define pteval_t pte_basic_t +#define pmdval_t pmd_t +static inline pteval_t ptenuma_flags(pte_t pte) { - return pte_pmd(pte_mknonnuma(pmd_pte(pmd))); + return pte_val(pte) & _PAGE_NUMA_MASK; } -#define pmd_mknuma pmd_mknuma -static inline pmd_t pmd_mknuma(pmd_t pmd) +static inline pmdval_t pmdnuma_flags(pmd_t pmd) { - return pte_pmd(pte_mknuma(pmd_pte(pmd))); + return pmd_val(pmd) & _PAGE_NUMA_MASK; } # else diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index 8d1569c29042..e040c3595129 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h @@ -98,6 +98,11 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); _PAGE_USER | _PAGE_ACCESSED | \ _PAGE_RW | _PAGE_HWWRITE | _PAGE_DIRTY | _PAGE_EXEC) +#ifdef CONFIG_NUMA_BALANCING +/* Mask of bits that distinguish present and numa ptes */ +#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PRESENT) +#endif + /* * We define 2 sets of base prot bits, one for basic pages (ie, * cacheable kernel and user pages) and one for non cacheable diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e4b1f431c7ed..3eb8a41509b3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -30,7 +30,6 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select ARCH_SUPPORTS_NUMA_BALANCING if X86_64 select ARCH_SUPPORTS_INT128 if X86_64 - select ARCH_WANTS_PROT_NUMA_PROT_NONE select HAVE_IDE select HAVE_OPROFILE select HAVE_PCSPKR_PLATFORM diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index f216963760e5..0f9724c9c510 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -325,6 +325,20 @@ static inline pteval_t pte_flags(pte_t pte) return native_pte_val(pte) & PTE_FLAGS_MASK; } +#ifdef CONFIG_NUMA_BALANCING +/* Set of bits that distinguishes present, prot_none and numa ptes */ +#define _PAGE_NUMA_MASK (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT) +static inline pteval_t ptenuma_flags(pte_t pte) +{ + return pte_flags(pte) & _PAGE_NUMA_MASK; +} + +static inline pmdval_t pmdnuma_flags(pmd_t pmd) +{ + return pmd_flags(pmd) & _PAGE_NUMA_MASK; +} +#endif /* CONFIG_NUMA_BALANCING */ + #define pgprot_val(x) ((x).pgprot) #define __pgprot(x) ((pgprot_t) { (x) } ) diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 53b2acc38213..281870f56450 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h @@ -660,11 +660,12 @@ static inline int pmd_trans_unstable(pmd_t *pmd) } #ifdef CONFIG_NUMA_BALANCING -#ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE /* - * _PAGE_NUMA works identical to _PAGE_PROTNONE (it's actually the - * same bit too). It's set only when _PAGE_PRESET is not set and it's - * never set if _PAGE_PRESENT is set. + * _PAGE_NUMA distinguishes between an unmapped page table entry, an entry that + * is protected for PROT_NONE and a NUMA hinting fault entry. If the + * architecture defines __PAGE_PROTNONE then it should take that into account + * but those that do not can rely on the fact that the NUMA hinting scanner + * skips inaccessible VMAs. * * pte/pmd_present() returns true if pte/pmd_numa returns true. Page * fault triggers on those regions if pte/pmd_numa returns true @@ -673,16 +674,14 @@ static inline int pmd_trans_unstable(pmd_t *pmd) #ifndef pte_numa static inline int pte_numa(pte_t pte) { - return (pte_flags(pte) & - (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; + return ptenuma_flags(pte) == _PAGE_NUMA; } #endif #ifndef pmd_numa static inline int pmd_numa(pmd_t pmd) { - return (pmd_flags(pmd) & - (_PAGE_NUMA|_PAGE_PROTNONE|_PAGE_PRESENT)) == _PAGE_NUMA; + return pmdnuma_flags(pmd) == _PAGE_NUMA; } #endif @@ -722,6 +721,8 @@ static inline pte_t pte_mknuma(pte_t pte) { pteval_t val = pte_val(pte); + VM_BUG_ON(!(val & _PAGE_PRESENT)); + val &= ~_PAGE_PRESENT; val |= _PAGE_NUMA; @@ -765,16 +766,6 @@ static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, } #endif #else -extern int pte_numa(pte_t pte); -extern int pmd_numa(pmd_t pmd); -extern pte_t pte_mknonnuma(pte_t pte); -extern pmd_t pmd_mknonnuma(pmd_t pmd); -extern pte_t pte_mknuma(pte_t pte); -extern pmd_t pmd_mknuma(pmd_t pmd); -extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep); -extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp); -#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */ -#else static inline int pmd_numa(pmd_t pmd) { return 0; diff --git a/init/Kconfig b/init/Kconfig index e25a82a291a6..d2355812ba48 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -889,17 +889,6 @@ config ARCH_SUPPORTS_INT128 config ARCH_WANT_NUMA_VARIABLE_LOCALITY bool -# -# For architectures that are willing to define _PAGE_NUMA as _PAGE_PROTNONE -config ARCH_WANTS_PROT_NUMA_PROT_NONE - bool - -config ARCH_USES_NUMA_PROT_NONE - bool - default y - depends on ARCH_WANTS_PROT_NUMA_PROT_NONE - depends on NUMA_BALANCING - config NUMA_BALANCING_DEFAULT_ENABLED bool "Automatically enable NUMA aware memory/task placement" default y -- cgit v1.2.3 From 505e3be6c082489a32a88e042f930d047b6415bc Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 9 Oct 2014 15:26:35 -0700 Subject: lib/genalloc.c: add power aligned algorithm One of the more common algorithms used for allocation is to align the start address of the allocation to the order of size requested. Add this as an algorithm option for genalloc. Signed-off-by: Laura Abbott Acked-by: Will Deacon Acked-by: Olof Johansson Reviewed-by: Catalin Marinas Cc: Arnd Bergmann Cc: David Riley Cc: Ritesh Harjain Cc: Russell King Cc: Thierry Reding Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/genalloc.h | 4 ++++ lib/genalloc.c | 20 ++++++++++++++++++++ 2 files changed, 24 insertions(+) (limited to 'include') diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 1c2fdaa2ffc3..3cd0934d62ba 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -110,6 +110,10 @@ extern void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, extern unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data); +extern unsigned long gen_pool_first_fit_order_align(unsigned long *map, + unsigned long size, unsigned long start, unsigned int nr, + void *data); + extern unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, void *data); diff --git a/lib/genalloc.c b/lib/genalloc.c index 38d2db82228c..166f17b9f169 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -480,6 +480,26 @@ unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size, } EXPORT_SYMBOL(gen_pool_first_fit); +/** + * gen_pool_first_fit_order_align - find the first available region + * of memory matching the size requirement. The region will be aligned + * to the order of the size specified. + * @map: The address to base the search on + * @size: The bitmap size in bits + * @start: The bitnumber to start searching at + * @nr: The number of zeroed bits we're looking for + * @data: additional data - unused + */ +unsigned long gen_pool_first_fit_order_align(unsigned long *map, + unsigned long size, unsigned long start, + unsigned int nr, void *data) +{ + unsigned long align_mask = roundup_pow_of_two(nr) - 1; + + return bitmap_find_next_zero_area(map, size, start, nr, align_mask); +} +EXPORT_SYMBOL(gen_pool_first_fit_order_align); + /** * gen_pool_best_fit - find the best fitting region of memory * macthing the size requirement (no alignment constraint) -- cgit v1.2.3 From 9efb3a421d55d30b65fb0dbee05108d15c6c55f7 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 9 Oct 2014 15:26:38 -0700 Subject: lib/genalloc.c: add genpool range check function After allocating an address from a particular genpool, there is no good way to verify if that address actually belongs to a genpool. Introduce addr_in_gen_pool which will return if an address plus size falls completely within the genpool range. Signed-off-by: Laura Abbott Acked-by: Will Deacon Reviewed-by: Olof Johansson Reviewed-by: Catalin Marinas Cc: Arnd Bergmann Cc: David Riley Cc: Ritesh Harjain Cc: Russell King Cc: Thierry Reding Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/genalloc.h | 3 +++ lib/genalloc.c | 29 +++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) (limited to 'include') diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h index 3cd0934d62ba..1ccaab44abcc 100644 --- a/include/linux/genalloc.h +++ b/include/linux/genalloc.h @@ -121,6 +121,9 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order, int nid); extern struct gen_pool *dev_get_gen_pool(struct device *dev); +bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, + size_t size); + #ifdef CONFIG_OF extern struct gen_pool *of_get_named_gen_pool(struct device_node *np, const char *propname, int index); diff --git a/lib/genalloc.c b/lib/genalloc.c index 166f17b9f169..cce4dd68c40d 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -402,6 +402,35 @@ void gen_pool_for_each_chunk(struct gen_pool *pool, } EXPORT_SYMBOL(gen_pool_for_each_chunk); +/** + * addr_in_gen_pool - checks if an address falls within the range of a pool + * @pool: the generic memory pool + * @start: start address + * @size: size of the region + * + * Check if the range of addresses falls within the specified pool. Returns + * true if the entire range is contained in the pool and false otherwise. + */ +bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start, + size_t size) +{ + bool found = false; + unsigned long end = start + size; + struct gen_pool_chunk *chunk; + + rcu_read_lock(); + list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) { + if (start >= chunk->start_addr && start <= chunk->end_addr) { + if (end <= chunk->end_addr) { + found = true; + break; + } + } + } + rcu_read_unlock(); + return found; +} + /** * gen_pool_avail - get available free space of the pool * @pool: pool to get available free space -- cgit v1.2.3 From 513510ddba9650fc7da456eefeb0ead7632324f6 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 9 Oct 2014 15:26:40 -0700 Subject: common: dma-mapping: introduce common remapping functions For architectures without coherent DMA, memory for DMA may need to be remapped with coherent attributes. Factor out the the remapping code from arm and put it in a common location to reduce code duplication. As part of this, the arm APIs are now migrated away from ioremap_page_range to the common APIs which use map_vm_area for remapping. This should be an equivalent change and using map_vm_area is more correct as ioremap_page_range is intended to bring in io addresses into the cpu space and not regular kernel managed memory. Signed-off-by: Laura Abbott Reviewed-by: Catalin Marinas Cc: Arnd Bergmann Cc: David Riley Cc: Olof Johansson Cc: Ritesh Harjain Cc: Russell King Cc: Thierry Reding Cc: Will Deacon Cc: James Hogan Cc: Laura Abbott Cc: Mitchel Humpherys Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/mm/dma-mapping.c | 57 ++++--------------------- drivers/base/dma-mapping.c | 72 ++++++++++++++++++++++++++++++++ include/asm-generic/dma-mapping-common.h | 9 ++++ 3 files changed, 90 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7a996aaa061e..eecc8e60deea 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -298,37 +298,19 @@ static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { - struct vm_struct *area; - unsigned long addr; - /* * DMA allocation can be mapped to user space, so lets * set VM_USERMAP flags too. */ - area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, - caller); - if (!area) - return NULL; - addr = (unsigned long)area->addr; - area->phys_addr = __pfn_to_phys(page_to_pfn(page)); - - if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { - vunmap((void *)addr); - return NULL; - } - return (void *)addr; + return dma_common_contiguous_remap(page, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP, + prot, caller); } static void __dma_free_remap(void *cpu_addr, size_t size) { - unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; - struct vm_struct *area = find_vm_area(cpu_addr); - if (!area || (area->flags & flags) != flags) { - WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); - return; - } - unmap_kernel_range((unsigned long)cpu_addr, size); - vunmap(cpu_addr); + dma_common_free_remap(cpu_addr, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP); } #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K @@ -1271,29 +1253,8 @@ static void * __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { - unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; - struct vm_struct *area; - unsigned long p; - - area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, - caller); - if (!area) - return NULL; - - area->pages = pages; - area->nr_pages = nr_pages; - p = (unsigned long)area->addr; - - for (i = 0; i < nr_pages; i++) { - phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); - if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) - goto err; - p += PAGE_SIZE; - } - return area->addr; -err: - unmap_kernel_range((unsigned long)area->addr, size); - vunmap(area->addr); + return dma_common_pages_remap(pages, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); return NULL; } @@ -1501,8 +1462,8 @@ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, } if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { - unmap_kernel_range((unsigned long)cpu_addr, size); - vunmap(cpu_addr); + dma_common_free_remap(cpu_addr, size, + VM_ARM_DMA_CONSISTENT | VM_USERMAP); } __iommu_remove_mapping(dev, handle, size); diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 6cd08e145bfa..9e8bbdd470ca 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -10,6 +10,8 @@ #include #include #include +#include +#include #include /* @@ -267,3 +269,73 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, return ret; } EXPORT_SYMBOL(dma_common_mmap); + +#ifdef CONFIG_MMU +/* + * remaps an array of PAGE_SIZE pages into another vm_area + * Cannot be used in non-sleeping contexts + */ +void *dma_common_pages_remap(struct page **pages, size_t size, + unsigned long vm_flags, pgprot_t prot, + const void *caller) +{ + struct vm_struct *area; + + area = get_vm_area_caller(size, vm_flags, caller); + if (!area) + return NULL; + + area->pages = pages; + + if (map_vm_area(area, prot, pages)) { + vunmap(area->addr); + return NULL; + } + + return area->addr; +} + +/* + * remaps an allocated contiguous region into another vm_area. + * Cannot be used in non-sleeping contexts + */ + +void *dma_common_contiguous_remap(struct page *page, size_t size, + unsigned long vm_flags, + pgprot_t prot, const void *caller) +{ + int i; + struct page **pages; + void *ptr; + unsigned long pfn; + + pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); + if (!pages) + return NULL; + + for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++) + pages[i] = pfn_to_page(pfn + i); + + ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller); + + kfree(pages); + + return ptr; +} + +/* + * unmaps a range previously mapped by dma_common_*_remap + */ +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags) +{ + struct vm_struct *area = find_vm_area(cpu_addr); + + if (!area || (area->flags & vm_flags) != vm_flags) { + WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); + return; + } + + unmap_kernel_range((unsigned long)cpu_addr, size); + vunmap(cpu_addr); +} +#endif diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h index de8bf89940f8..a9fd248f5d48 100644 --- a/include/asm-generic/dma-mapping-common.h +++ b/include/asm-generic/dma-mapping-common.h @@ -179,6 +179,15 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size); +void *dma_common_contiguous_remap(struct page *page, size_t size, + unsigned long vm_flags, + pgprot_t prot, const void *caller); + +void *dma_common_pages_remap(struct page **pages, size_t size, + unsigned long vm_flags, pgprot_t prot, + const void *caller); +void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); + /** * dma_mmap_attrs - map a coherent DMA allocation into user space * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices -- cgit v1.2.3 From 53853e2d2bfb748a8b5aa2fd1de15699266865e0 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:02 -0700 Subject: mm, compaction: defer each zone individually instead of preferred zone When direct sync compaction is often unsuccessful, it may become deferred for some time to avoid further useless attempts, both sync and async. Successful high-order allocations un-defer compaction, while further unsuccessful compaction attempts prolong the compaction deferred period. Currently the checking and setting deferred status is performed only on the preferred zone of the allocation that invoked direct compaction. But compaction itself is attempted on all eligible zones in the zonelist, so the behavior is suboptimal and may lead both to scenarios where 1) compaction is attempted uselessly, or 2) where it's not attempted despite good chances of succeeding, as shown on the examples below: 1) A direct compaction with Normal preferred zone failed and set deferred compaction for the Normal zone. Another unrelated direct compaction with DMA32 as preferred zone will attempt to compact DMA32 zone even though the first compaction attempt also included DMA32 zone. In another scenario, compaction with Normal preferred zone failed to compact Normal zone, but succeeded in the DMA32 zone, so it will not defer compaction. In the next attempt, it will try Normal zone which will fail again, instead of skipping Normal zone and trying DMA32 directly. 2) Kswapd will balance DMA32 zone and reset defer status based on watermarks looking good. A direct compaction with preferred Normal zone will skip compaction of all zones including DMA32 because Normal was still deferred. The allocation might have succeeded in DMA32, but won't. This patch makes compaction deferring work on individual zone basis instead of preferred zone. For each zone, it checks compaction_deferred() to decide if the zone should be skipped. If watermarks fail after compacting the zone, defer_compaction() is called. The zone where watermarks passed can still be deferred when the allocation attempt is unsuccessful. When allocation is successful, compaction_defer_reset() is called for the zone containing the allocated page. This approach should approximate calling defer_compaction() only on zones where compaction was attempted and did not yield allocated page. There might be corner cases but that is inevitable as long as the decision to stop compacting dues not guarantee that a page will be allocated. Due to a new COMPACT_DEFERRED return value, some functions relying implicitly on COMPACT_SKIPPED = 0 had to be updated, with comments made more accurate. The did_some_progress output parameter of __alloc_pages_direct_compact() is removed completely, as the caller actually does not use it after compaction sets it - it is only considered when direct reclaim sets it. During testing on a two-node machine with a single very small Normal zone on node 1, this patch has improved success rates in stress-highalloc mmtests benchmark. The success here were previously made worse by commit 3a025760fc15 ("mm: page_alloc: spill to remote nodes before waking kswapd") as kswapd was no longer resetting often enough the deferred compaction for the Normal zone, and DMA32 zones on both nodes were thus not considered for compaction. On different machine, success rates were improved with __GFP_NO_KSWAPD allocations. [akpm@linux-foundation.org: fix CONFIG_COMPACTION=n build] Signed-off-by: Vlastimil Babka Acked-by: Minchan Kim Reviewed-by: Zhang Yanfei Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 16 ++++++++----- mm/compaction.c | 32 ++++++++++++++++++++------ mm/page_alloc.c | 57 +++++++++++++++++++++++++--------------------- mm/vmscan.c | 14 ++++++++---- 4 files changed, 76 insertions(+), 43 deletions(-) (limited to 'include') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 01e3132820da..b2e4c92d0445 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -2,14 +2,16 @@ #define _LINUX_COMPACTION_H /* Return values for compact_zone() and try_to_compact_pages() */ +/* compaction didn't start as it was deferred due to past failures */ +#define COMPACT_DEFERRED 0 /* compaction didn't start as it was not possible or direct reclaim was more suitable */ -#define COMPACT_SKIPPED 0 +#define COMPACT_SKIPPED 1 /* compaction should continue to another pageblock */ -#define COMPACT_CONTINUE 1 +#define COMPACT_CONTINUE 2 /* direct compaction partially compacted a zone and there are suitable pages */ -#define COMPACT_PARTIAL 2 +#define COMPACT_PARTIAL 3 /* The full zone was compacted */ -#define COMPACT_COMPLETE 3 +#define COMPACT_COMPLETE 4 #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; @@ -22,7 +24,8 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - enum migrate_mode mode, bool *contended); + enum migrate_mode mode, bool *contended, + struct zone **candidate_zone); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); extern unsigned long compaction_suitable(struct zone *zone, int order); @@ -91,7 +94,8 @@ static inline bool compaction_restarting(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - enum migrate_mode mode, bool *contended) + enum migrate_mode mode, bool *contended, + struct zone **candidate_zone) { return COMPACT_CONTINUE; } diff --git a/mm/compaction.c b/mm/compaction.c index 21bf292b642a..1c7195d42e83 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1125,27 +1125,26 @@ int sysctl_extfrag_threshold = 500; * @nodemask: The allowed nodes to allocate from * @mode: The migration mode for async, sync light, or sync migration * @contended: Return value that is true if compaction was aborted due to lock contention - * @page: Optionally capture a free page of the requested order during compaction + * @candidate_zone: Return the zone where we think allocation should succeed * * This is the main entry point for direct page compaction. */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - enum migrate_mode mode, bool *contended) + enum migrate_mode mode, bool *contended, + struct zone **candidate_zone) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); int may_enter_fs = gfp_mask & __GFP_FS; int may_perform_io = gfp_mask & __GFP_IO; struct zoneref *z; struct zone *zone; - int rc = COMPACT_SKIPPED; + int rc = COMPACT_DEFERRED; int alloc_flags = 0; /* Check if the GFP flags allow compaction */ if (!order || !may_enter_fs || !may_perform_io) - return rc; - - count_compact_event(COMPACTSTALL); + return COMPACT_SKIPPED; #ifdef CONFIG_CMA if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) @@ -1156,14 +1155,33 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, nodemask) { int status; + if (compaction_deferred(zone, order)) + continue; + status = compact_zone_order(zone, order, gfp_mask, mode, contended); rc = max(status, rc); /* If a normal allocation would succeed, stop compacting */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, - alloc_flags)) + alloc_flags)) { + *candidate_zone = zone; + /* + * We think the allocation will succeed in this zone, + * but it is not certain, hence the false. The caller + * will repeat this with true if allocation indeed + * succeeds in this zone. + */ + compaction_defer_reset(zone, order, false); break; + } else if (mode != MIGRATE_ASYNC) { + /* + * We think that allocation won't succeed in this zone + * so we defer compaction there. If it ends up + * succeeding after all, it will be reset. + */ + defer_compaction(zone, order); + } } return rc; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e63bf7744a0c..514fd8008114 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2297,24 +2297,28 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int classzone_idx, int migratetype, enum migrate_mode mode, - bool *contended_compaction, bool *deferred_compaction, - unsigned long *did_some_progress) + bool *contended_compaction, bool *deferred_compaction) { - if (!order) - return NULL; + struct zone *last_compact_zone = NULL; + unsigned long compact_result; - if (compaction_deferred(preferred_zone, order)) { - *deferred_compaction = true; + + if (!order) return NULL; - } current->flags |= PF_MEMALLOC; - *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, + compact_result = try_to_compact_pages(zonelist, order, gfp_mask, nodemask, mode, - contended_compaction); + contended_compaction, + &last_compact_zone); current->flags &= ~PF_MEMALLOC; - if (*did_some_progress != COMPACT_SKIPPED) { + if (compact_result > COMPACT_DEFERRED) + count_vm_event(COMPACTSTALL); + else + *deferred_compaction = true; + + if (compact_result > COMPACT_SKIPPED) { struct page *page; /* Page migration frees to the PCP lists but we want merging */ @@ -2325,13 +2329,24 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, order, zonelist, high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, preferred_zone, classzone_idx, migratetype); + if (page) { - preferred_zone->compact_blockskip_flush = false; - compaction_defer_reset(preferred_zone, order, true); + struct zone *zone = page_zone(page); + + zone->compact_blockskip_flush = false; + compaction_defer_reset(zone, order, true); count_vm_event(COMPACTSUCCESS); return page; } + /* + * last_compact_zone is where try_to_compact_pages thought + * allocation should succeed, so it did not defer compaction. + * But now we know that it didn't succeed, so we do the defer. + */ + if (last_compact_zone && mode != MIGRATE_ASYNC) + defer_compaction(last_compact_zone, order); + /* * It's bad if compaction run occurs and fails. * The most likely reason is that pages exist, @@ -2339,13 +2354,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, */ count_vm_event(COMPACTFAIL); - /* - * As async compaction considers a subset of pageblocks, only - * defer if the failure was a sync compaction failure. - */ - if (mode != MIGRATE_ASYNC) - defer_compaction(preferred_zone, order); - cond_resched(); } @@ -2356,9 +2364,8 @@ static inline struct page * __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, - int classzone_idx, int migratetype, - enum migrate_mode mode, bool *contended_compaction, - bool *deferred_compaction, unsigned long *did_some_progress) + int classzone_idx, int migratetype, enum migrate_mode mode, + bool *contended_compaction, bool *deferred_compaction) { return NULL; } @@ -2634,8 +2641,7 @@ rebalance: preferred_zone, classzone_idx, migratetype, migration_mode, &contended_compaction, - &deferred_compaction, - &did_some_progress); + &deferred_compaction); if (page) goto got_pg; @@ -2727,8 +2733,7 @@ rebalance: preferred_zone, classzone_idx, migratetype, migration_mode, &contended_compaction, - &deferred_compaction, - &did_some_progress); + &deferred_compaction); if (page) goto got_pg; } diff --git a/mm/vmscan.c b/mm/vmscan.c index 2836b5373b2e..1a71b8b1ea34 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2315,7 +2315,10 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc) return reclaimable; } -/* Returns true if compaction should go ahead for a high-order request */ +/* + * Returns true if compaction should go ahead for a high-order request, or + * the high-order allocation would succeed without compaction. + */ static inline bool compaction_ready(struct zone *zone, int order) { unsigned long balance_gap, watermark; @@ -2339,8 +2342,11 @@ static inline bool compaction_ready(struct zone *zone, int order) if (compaction_deferred(zone, order)) return watermark_ok; - /* If compaction is not ready to start, keep reclaiming */ - if (!compaction_suitable(zone, order)) + /* + * If compaction is not ready to start and allocation is not likely + * to succeed without it, then keep reclaiming. + */ + if (compaction_suitable(zone, order) == COMPACT_SKIPPED) return false; return watermark_ok; @@ -2818,7 +2824,7 @@ static bool zone_balanced(struct zone *zone, int order, return false; if (IS_ENABLED(CONFIG_COMPACTION) && order && - !compaction_suitable(zone, order)) + compaction_suitable(zone, order) == COMPACT_SKIPPED) return false; return true; -- cgit v1.2.3 From 1f9efdef4f3f1d2a073e524113fd0038af636f2b Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 9 Oct 2014 15:27:14 -0700 Subject: mm, compaction: khugepaged should not give up due to need_resched() Async compaction aborts when it detects zone lock contention or need_resched() is true. David Rientjes has reported that in practice, most direct async compactions for THP allocation abort due to need_resched(). This means that a second direct compaction is never attempted, which might be OK for a page fault, but khugepaged is intended to attempt a sync compaction in such case and in these cases it won't. This patch replaces "bool contended" in compact_control with an int that distinguishes between aborting due to need_resched() and aborting due to lock contention. This allows propagating the abort through all compaction functions as before, but passing the abort reason up to __alloc_pages_slowpath() which decides when to continue with direct reclaim and another compaction attempt. Another problem is that try_to_compact_pages() did not act upon the reported contention (both need_resched() or lock contention) immediately and would proceed with another zone from the zonelist. When need_resched() is true, that means initializing another zone compaction, only to check again need_resched() in isolate_migratepages() and aborting. For zone lock contention, the unintended consequence is that the lock contended status reported back to the allocator is detrmined from the last zone where compaction was attempted, which is rather arbitrary. This patch fixes the problem in the following way: - async compaction of a zone aborting due to need_resched() or fatal signal pending means that further zones should not be tried. We report COMPACT_CONTENDED_SCHED to the allocator. - aborting zone compaction due to lock contention means we can still try another zone, since it has different set of locks. We report back COMPACT_CONTENDED_LOCK only if *all* zones where compaction was attempted, it was aborted due to lock contention. As a result of these fixes, khugepaged will proceed with second sync compaction as intended, when the preceding async compaction aborted due to need_resched(). Page fault compactions aborting due to need_resched() will spare some cycles previously wasted by initializing another zone compaction only to abort again. Lock contention will be reported only when compaction in all zones aborted due to lock contention, and therefore it's not a good idea to try again after reclaim. In stress-highalloc from mmtests configured to use __GFP_NO_KSWAPD, this has improved number of THP collapse allocations by 10%, which shows positive effect on khugepaged. The benchmark's success rates are unchanged as it is not recognized as khugepaged. Numbers of compact_stall and compact_fail events have however decreased by 20%, with compact_success still a bit improved, which is good. With benchmark configured not to use __GFP_NO_KSWAPD, there is 6% improvement in THP collapse allocations, and only slight improvement in stalls and failures. [akpm@linux-foundation.org: fix warnings] Reported-by: David Rientjes Signed-off-by: Vlastimil Babka Cc: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 12 +++++-- mm/compaction.c | 87 ++++++++++++++++++++++++++++++++++++++++------ mm/internal.h | 4 +-- mm/page_alloc.c | 45 +++++++++++++++++------- 4 files changed, 121 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index b2e4c92d0445..60bdf8dc02a3 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -13,6 +13,14 @@ /* The full zone was compacted */ #define COMPACT_COMPLETE 4 +/* Used to signal whether compaction detected need_sched() or lock contention */ +/* No contention detected */ +#define COMPACT_CONTENDED_NONE 0 +/* Either need_sched() was true or fatal signal pending */ +#define COMPACT_CONTENDED_SCHED 1 +/* Zone lock or lru_lock was contended in async compaction */ +#define COMPACT_CONTENDED_LOCK 2 + #ifdef CONFIG_COMPACTION extern int sysctl_compact_memory; extern int sysctl_compaction_handler(struct ctl_table *table, int write, @@ -24,7 +32,7 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, - enum migrate_mode mode, bool *contended, + enum migrate_mode mode, int *contended, struct zone **candidate_zone); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); @@ -94,7 +102,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) #else static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - enum migrate_mode mode, bool *contended, + enum migrate_mode mode, int *contended, struct zone **candidate_zone) { return COMPACT_CONTINUE; diff --git a/mm/compaction.c b/mm/compaction.c index 1067c07cb33d..26bb20ef853d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -223,9 +223,21 @@ static void update_pageblock_skip(struct compact_control *cc, } #endif /* CONFIG_COMPACTION */ -static inline bool should_release_lock(spinlock_t *lock) +static int should_release_lock(spinlock_t *lock) { - return need_resched() || spin_is_contended(lock); + /* + * Sched contention has higher priority here as we may potentially + * have to abort whole compaction ASAP. Returning with lock contention + * means we will try another zone, and further decisions are + * influenced only when all zones are lock contended. That means + * potentially missing a lock contention is less critical. + */ + if (need_resched()) + return COMPACT_CONTENDED_SCHED; + else if (spin_is_contended(lock)) + return COMPACT_CONTENDED_LOCK; + + return COMPACT_CONTENDED_NONE; } /* @@ -240,7 +252,9 @@ static inline bool should_release_lock(spinlock_t *lock) static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, bool locked, struct compact_control *cc) { - if (should_release_lock(lock)) { + int contended = should_release_lock(lock); + + if (contended) { if (locked) { spin_unlock_irqrestore(lock, *flags); locked = false; @@ -248,7 +262,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, /* async aborts if taking too long or contended */ if (cc->mode == MIGRATE_ASYNC) { - cc->contended = true; + cc->contended = contended; return false; } @@ -274,7 +288,7 @@ static inline bool compact_should_abort(struct compact_control *cc) /* async compaction aborts if contended */ if (need_resched()) { if (cc->mode == MIGRATE_ASYNC) { - cc->contended = true; + cc->contended = COMPACT_CONTENDED_SCHED; return true; } @@ -1140,7 +1154,7 @@ out: } static unsigned long compact_zone_order(struct zone *zone, int order, - gfp_t gfp_mask, enum migrate_mode mode, bool *contended) + gfp_t gfp_mask, enum migrate_mode mode, int *contended) { unsigned long ret; struct compact_control cc = { @@ -1172,14 +1186,15 @@ int sysctl_extfrag_threshold = 500; * @gfp_mask: The GFP mask of the current allocation * @nodemask: The allowed nodes to allocate from * @mode: The migration mode for async, sync light, or sync migration - * @contended: Return value that is true if compaction was aborted due to lock contention + * @contended: Return value that determines if compaction was aborted due to + * need_resched() or lock contention * @candidate_zone: Return the zone where we think allocation should succeed * * This is the main entry point for direct page compaction. */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, - enum migrate_mode mode, bool *contended, + enum migrate_mode mode, int *contended, struct zone **candidate_zone) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); @@ -1189,6 +1204,9 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, struct zone *zone; int rc = COMPACT_DEFERRED; int alloc_flags = 0; + int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ + + *contended = COMPACT_CONTENDED_NONE; /* Check if the GFP flags allow compaction */ if (!order || !may_enter_fs || !may_perform_io) @@ -1202,13 +1220,19 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) { int status; + int zone_contended; if (compaction_deferred(zone, order)) continue; status = compact_zone_order(zone, order, gfp_mask, mode, - contended); + &zone_contended); rc = max(status, rc); + /* + * It takes at least one zone that wasn't lock contended + * to clear all_zones_contended. + */ + all_zones_contended &= zone_contended; /* If a normal allocation would succeed, stop compacting */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, @@ -1221,8 +1245,21 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, * succeeds in this zone. */ compaction_defer_reset(zone, order, false); - break; - } else if (mode != MIGRATE_ASYNC) { + /* + * It is possible that async compaction aborted due to + * need_resched() and the watermarks were ok thanks to + * somebody else freeing memory. The allocation can + * however still fail so we better signal the + * need_resched() contention anyway (this will not + * prevent the allocation attempt). + */ + if (zone_contended == COMPACT_CONTENDED_SCHED) + *contended = COMPACT_CONTENDED_SCHED; + + goto break_loop; + } + + if (mode != MIGRATE_ASYNC) { /* * We think that allocation won't succeed in this zone * so we defer compaction there. If it ends up @@ -1230,8 +1267,36 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, */ defer_compaction(zone, order); } + + /* + * We might have stopped compacting due to need_resched() in + * async compaction, or due to a fatal signal detected. In that + * case do not try further zones and signal need_resched() + * contention. + */ + if ((zone_contended == COMPACT_CONTENDED_SCHED) + || fatal_signal_pending(current)) { + *contended = COMPACT_CONTENDED_SCHED; + goto break_loop; + } + + continue; +break_loop: + /* + * We might not have tried all the zones, so be conservative + * and assume they are not all lock contended. + */ + all_zones_contended = 0; + break; } + /* + * If at least one zone wasn't deferred or skipped, we report if all + * zones that were tried were lock contended. + */ + if (rc > COMPACT_SKIPPED && all_zones_contended) + *contended = COMPACT_CONTENDED_LOCK; + return rc; } diff --git a/mm/internal.h b/mm/internal.h index 5a0738fa649c..4c1d604c396c 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -144,8 +144,8 @@ struct compact_control { int order; /* order a direct compactor needs */ int migratetype; /* MOVABLE, RECLAIMABLE etc */ struct zone *zone; - bool contended; /* True if a lock was contended, or - * need_resched() true during async + int contended; /* Signal need_sched() or lock + * contention detected during * compaction */ }; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dfbf54b51649..313338d74095 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2297,7 +2297,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int classzone_idx, int migratetype, enum migrate_mode mode, - bool *contended_compaction, bool *deferred_compaction) + int *contended_compaction, bool *deferred_compaction) { struct zone *last_compact_zone = NULL; unsigned long compact_result; @@ -2371,7 +2371,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, enum zone_type high_zoneidx, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, int classzone_idx, int migratetype, enum migrate_mode mode, - bool *contended_compaction, bool *deferred_compaction) + int *contended_compaction, bool *deferred_compaction) { return NULL; } @@ -2547,7 +2547,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, unsigned long did_some_progress; enum migrate_mode migration_mode = MIGRATE_ASYNC; bool deferred_compaction = false; - bool contended_compaction = false; + int contended_compaction = COMPACT_CONTENDED_NONE; /* * In the slowpath, we sanity check order to avoid ever trying to @@ -2651,15 +2651,36 @@ rebalance: if (page) goto got_pg; - /* - * If compaction is deferred for high-order allocations, it is because - * sync compaction recently failed. In this is the case and the caller - * requested a movable allocation that does not heavily disrupt the - * system then fail the allocation instead of entering direct reclaim. - */ - if ((deferred_compaction || contended_compaction) && - (gfp_mask & __GFP_NO_KSWAPD)) - goto nopage; + /* Checks for THP-specific high-order allocations */ + if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) { + /* + * If compaction is deferred for high-order allocations, it is + * because sync compaction recently failed. If this is the case + * and the caller requested a THP allocation, we do not want + * to heavily disrupt the system, so we fail the allocation + * instead of entering direct reclaim. + */ + if (deferred_compaction) + goto nopage; + + /* + * In all zones where compaction was attempted (and not + * deferred or skipped), lock contention has been detected. + * For THP allocation we do not want to disrupt the others + * so we fallback to base pages instead. + */ + if (contended_compaction == COMPACT_CONTENDED_LOCK) + goto nopage; + + /* + * If compaction was aborted due to need_resched(), we do not + * want to further increase allocation latency, unless it is + * khugepaged trying to collapse. + */ + if (contended_compaction == COMPACT_CONTENDED_SCHED + && !(current->flags & PF_KTHREAD)) + goto nopage; + } /* * It can become very expensive to allocate transparent hugepages at -- cgit v1.2.3 From 43e7a34d265e884b7cf34f9b05e6f2e0c05bf120 Mon Sep 17 00:00:00 2001 From: David Rientjes Date: Thu, 9 Oct 2014 15:27:25 -0700 Subject: mm: rename allocflags_to_migratetype for clarity The page allocator has gfp flags (like __GFP_WAIT) and alloc flags (like ALLOC_CPUSET) that have separate semantics. The function allocflags_to_migratetype() actually takes gfp flags, not alloc flags, and returns a migratetype. Rename it to gfpflags_to_migratetype(). Signed-off-by: David Rientjes Signed-off-by: Vlastimil Babka Reviewed-by: Zhang Yanfei Reviewed-by: Naoya Horiguchi Acked-by: Minchan Kim Acked-by: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Christoph Lameter Cc: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 2 +- mm/compaction.c | 4 ++-- mm/page_alloc.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 5e7219dc0fae..41b30fd4d041 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -156,7 +156,7 @@ struct vm_area_struct; #define GFP_DMA32 __GFP_DMA32 /* Convert GFP flags to their corresponding migrate type */ -static inline int allocflags_to_migratetype(gfp_t gfp_flags) +static inline int gfpflags_to_migratetype(const gfp_t gfp_flags) { WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK); diff --git a/mm/compaction.c b/mm/compaction.c index b9cf751cc00e..7c687c0eef6e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1242,7 +1242,7 @@ static unsigned long compact_zone_order(struct zone *zone, int order, .nr_freepages = 0, .nr_migratepages = 0, .order = order, - .migratetype = allocflags_to_migratetype(gfp_mask), + .migratetype = gfpflags_to_migratetype(gfp_mask), .zone = zone, .mode = mode, }; @@ -1294,7 +1294,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, return COMPACT_SKIPPED; #ifdef CONFIG_CMA - if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; #endif /* Compact each zone in the list */ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 313338d74095..f07588b11d59 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2523,7 +2523,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask) alloc_flags |= ALLOC_NO_WATERMARKS; } #ifdef CONFIG_CMA - if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) + if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) alloc_flags |= ALLOC_CMA; #endif return alloc_flags; @@ -2786,7 +2786,7 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct zone *preferred_zone; struct zoneref *preferred_zoneref; struct page *page = NULL; - int migratetype = allocflags_to_migratetype(gfp_mask); + int migratetype = gfpflags_to_migratetype(gfp_mask); unsigned int cpuset_mems_cookie; int alloc_flags = ALLOC_WMARK_LOW|ALLOC_CPUSET|ALLOC_FAIR; int classzone_idx; -- cgit v1.2.3 From 9c5990240e076ae564cccbd921868cd08f6daaa5 Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 9 Oct 2014 15:27:29 -0700 Subject: mm: introduce check_data_rlimit helper To eliminate code duplication lets introduce check_data_rlimit helper which we will use in brk() and prctl() syscalls. Signed-off-by: Cyrill Gorcunov Cc: Kees Cook Cc: Tejun Heo Cc: Andrew Vagin Cc: Eric W. Biederman Cc: H. Peter Anvin Acked-by: Serge Hallyn Cc: Pavel Emelyanov Cc: Vasiliy Kulikov Cc: KAMEZAWA Hiroyuki Cc: Michael Kerrisk Cc: Julien Tinnes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index 28df70774b81..4d814aa97785 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -18,6 +18,7 @@ #include #include #include +#include struct mempolicy; struct anon_vma; @@ -1780,6 +1781,20 @@ extern struct vm_area_struct *copy_vma(struct vm_area_struct **, bool *need_rmap_locks); extern void exit_mmap(struct mm_struct *); +static inline int check_data_rlimit(unsigned long rlim, + unsigned long new, + unsigned long start, + unsigned long end_data, + unsigned long start_data) +{ + if (rlim < RLIM_INFINITY) { + if (((new - start) + (end_data - start_data)) > rlim) + return -ENOSPC; + } + + return 0; +} + extern int mm_take_all_locks(struct mm_struct *mm); extern void mm_drop_all_locks(struct mm_struct *mm); -- cgit v1.2.3 From f606b77f1a9e362451aca8f81d8f36a3a112139e Mon Sep 17 00:00:00 2001 From: Cyrill Gorcunov Date: Thu, 9 Oct 2014 15:27:37 -0700 Subject: prctl: PR_SET_MM -- introduce PR_SET_MM_MAP operation During development of c/r we've noticed that in case if we need to support user namespaces we face a problem with capabilities in prctl(PR_SET_MM, ...) call, in particular once new user namespace is created capable(CAP_SYS_RESOURCE) no longer passes. A approach is to eliminate CAP_SYS_RESOURCE check but pass all new values in one bundle, which would allow the kernel to make more intensive test for sanity of values and same time allow us to support checkpoint/restore of user namespaces. Thus a new command PR_SET_MM_MAP introduced. It takes a pointer of prctl_mm_map structure which carries all the members to be updated. prctl(PR_SET_MM, PR_SET_MM_MAP, struct prctl_mm_map *, size) struct prctl_mm_map { __u64 start_code; __u64 end_code; __u64 start_data; __u64 end_data; __u64 start_brk; __u64 brk; __u64 start_stack; __u64 arg_start; __u64 arg_end; __u64 env_start; __u64 env_end; __u64 *auxv; __u32 auxv_size; __u32 exe_fd; }; All members except @exe_fd correspond ones of struct mm_struct. To figure out which available values these members may take here are meanings of the members. - start_code, end_code: represent bounds of executable code area - start_data, end_data: represent bounds of data area - start_brk, brk: used to calculate bounds for brk() syscall - start_stack: used when accounting space needed for command line arguments, environment and shmat() syscall - arg_start, arg_end, env_start, env_end: represent memory area supplied for command line arguments and environment variables - auxv, auxv_size: carries auxiliary vector, Elf format specifics - exe_fd: file descriptor number for executable link (/proc/self/exe) Thus we apply the following requirements to the values 1) Any member except @auxv, @auxv_size, @exe_fd is rather an address in user space thus it must be laying inside [mmap_min_addr, mmap_max_addr) interval. 2) While @[start|end]_code and @[start|end]_data may point to an nonexisting VMAs (say a program maps own new .text and .data segments during execution) the rest of members should belong to VMA which must exist. 3) Addresses must be ordered, ie @start_ member must not be greater or equal to appropriate @end_ member. 4) As in regular Elf loading procedure we require that @start_brk and @brk be greater than @end_data. 5) If RLIMIT_DATA rlimit is set to non-infinity new values should not exceed existing limit. Same applies to RLIMIT_STACK. 6) Auxiliary vector size must not exceed existing one (which is predefined as AT_VECTOR_SIZE and depends on architecture). 7) File descriptor passed in @exe_file should be pointing to executable file (because we use existing prctl_set_mm_exe_file_locked helper it ensures that the file we are going to use as exe link has all required permission granted). Now about where these members are involved inside kernel code: - @start_code and @end_code are used in /proc/$pid/[stat|statm] output; - @start_data and @end_data are used in /proc/$pid/[stat|statm] output, also they are considered if there enough space for brk() syscall result if RLIMIT_DATA is set; - @start_brk shown in /proc/$pid/stat output and accounted in brk() syscall if RLIMIT_DATA is set; also this member is tested to find a symbolic name of mmap event for perf system (we choose if event is generated for "heap" area); one more aplication is selinux -- we test if a process has PROCESS__EXECHEAP permission if trying to make heap area being executable with mprotect() syscall; - @brk is a current value for brk() syscall which lays inside heap area, it's shown in /proc/$pid/stat. When syscall brk() succesfully provides new memory area to a user space upon brk() completion the mm::brk is updated to carry new value; Both @start_brk and @brk are actively used in /proc/$pid/maps and /proc/$pid/smaps output to find a symbolic name "heap" for VMA being scanned; - @start_stack is printed out in /proc/$pid/stat and used to find a symbolic name "stack" for task and threads in /proc/$pid/maps and /proc/$pid/smaps output, and as the same as with @start_brk -- perf system uses it for event naming. Also kernel treat this member as a start address of where to map vDSO pages and to check if there is enough space for shmat() syscall; - @arg_start, @arg_end, @env_start and @env_end are printed out in /proc/$pid/stat. Another access to the data these members represent is to read /proc/$pid/environ or /proc/$pid/cmdline. Any attempt to read these areas kernel tests with access_process_vm helper so a user must have enough rights for this action; - @auxv and @auxv_size may be read from /proc/$pid/auxv. Strictly speaking kernel doesn't care much about which exactly data is sitting there because it is solely for userspace; - @exe_fd is referred from /proc/$pid/exe and when generating coredump. We uses prctl_set_mm_exe_file_locked helper to update this member, so exe-file link modification remains one-shot action. Still note that updating exe-file link now doesn't require sys-resource capability anymore, after all there is no much profit in preventing setup own file link (there are a number of ways to execute own code -- ptrace, ld-preload, so that the only reliable way to find which exactly code is executed is to inspect running program memory). Still we require the caller to be at least user-namespace root user. I believe the old interface should be deprecated and ripped off in a couple of kernel releases if no one against. To test if new interface is implemented in the kernel one can pass PR_SET_MM_MAP_SIZE opcode and the kernel returns the size of currently supported struct prctl_mm_map. [akpm@linux-foundation.org: fix 80-col wordwrap in macro definitions] Signed-off-by: Cyrill Gorcunov Cc: Kees Cook Cc: Tejun Heo Acked-by: Andrew Vagin Tested-by: Andrew Vagin Cc: Eric W. Biederman Cc: H. Peter Anvin Acked-by: Serge Hallyn Cc: Pavel Emelyanov Cc: Vasiliy Kulikov Cc: KAMEZAWA Hiroyuki Cc: Michael Kerrisk Cc: Julien Tinnes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/uapi/linux/prctl.h | 27 +++++++ kernel/sys.c | 190 ++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 216 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 58afc04c107e..513df75d0fc9 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -1,6 +1,8 @@ #ifndef _LINUX_PRCTL_H #define _LINUX_PRCTL_H +#include + /* Values to pass as first argument to prctl() */ #define PR_SET_PDEATHSIG 1 /* Second arg is a signal */ @@ -119,6 +121,31 @@ # define PR_SET_MM_ENV_END 11 # define PR_SET_MM_AUXV 12 # define PR_SET_MM_EXE_FILE 13 +# define PR_SET_MM_MAP 14 +# define PR_SET_MM_MAP_SIZE 15 + +/* + * This structure provides new memory descriptor + * map which mostly modifies /proc/pid/stat[m] + * output for a task. This mostly done in a + * sake of checkpoint/restore functionality. + */ +struct prctl_mm_map { + __u64 start_code; /* code section bounds */ + __u64 end_code; + __u64 start_data; /* data section bounds */ + __u64 end_data; + __u64 start_brk; /* heap for brk() syscall */ + __u64 brk; + __u64 start_stack; /* stack starts at */ + __u64 arg_start; /* command line arguments bounds */ + __u64 arg_end; + __u64 env_start; /* environment variables bounds */ + __u64 env_end; + __u64 *auxv; /* auxiliary vector */ + __u32 auxv_size; /* vector size */ + __u32 exe_fd; /* /proc/$pid/exe link file */ +}; /* * Set specific pid that is allowed to ptrace the current task. diff --git a/kernel/sys.c b/kernel/sys.c index 14222a1699c0..f7030b060018 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1687,6 +1687,187 @@ exit: return err; } +#ifdef CONFIG_CHECKPOINT_RESTORE +/* + * WARNING: we don't require any capability here so be very careful + * in what is allowed for modification from userspace. + */ +static int validate_prctl_map(struct prctl_mm_map *prctl_map) +{ + unsigned long mmap_max_addr = TASK_SIZE; + struct mm_struct *mm = current->mm; + int error = -EINVAL, i; + + static const unsigned char offsets[] = { + offsetof(struct prctl_mm_map, start_code), + offsetof(struct prctl_mm_map, end_code), + offsetof(struct prctl_mm_map, start_data), + offsetof(struct prctl_mm_map, end_data), + offsetof(struct prctl_mm_map, start_brk), + offsetof(struct prctl_mm_map, brk), + offsetof(struct prctl_mm_map, start_stack), + offsetof(struct prctl_mm_map, arg_start), + offsetof(struct prctl_mm_map, arg_end), + offsetof(struct prctl_mm_map, env_start), + offsetof(struct prctl_mm_map, env_end), + }; + + /* + * Make sure the members are not somewhere outside + * of allowed address space. + */ + for (i = 0; i < ARRAY_SIZE(offsets); i++) { + u64 val = *(u64 *)((char *)prctl_map + offsets[i]); + + if ((unsigned long)val >= mmap_max_addr || + (unsigned long)val < mmap_min_addr) + goto out; + } + + /* + * Make sure the pairs are ordered. + */ +#define __prctl_check_order(__m1, __op, __m2) \ + ((unsigned long)prctl_map->__m1 __op \ + (unsigned long)prctl_map->__m2) ? 0 : -EINVAL + error = __prctl_check_order(start_code, <, end_code); + error |= __prctl_check_order(start_data, <, end_data); + error |= __prctl_check_order(start_brk, <=, brk); + error |= __prctl_check_order(arg_start, <=, arg_end); + error |= __prctl_check_order(env_start, <=, env_end); + if (error) + goto out; +#undef __prctl_check_order + + error = -EINVAL; + + /* + * @brk should be after @end_data in traditional maps. + */ + if (prctl_map->start_brk <= prctl_map->end_data || + prctl_map->brk <= prctl_map->end_data) + goto out; + + /* + * Neither we should allow to override limits if they set. + */ + if (check_data_rlimit(rlimit(RLIMIT_DATA), prctl_map->brk, + prctl_map->start_brk, prctl_map->end_data, + prctl_map->start_data)) + goto out; + + /* + * Someone is trying to cheat the auxv vector. + */ + if (prctl_map->auxv_size) { + if (!prctl_map->auxv || prctl_map->auxv_size > sizeof(mm->saved_auxv)) + goto out; + } + + /* + * Finally, make sure the caller has the rights to + * change /proc/pid/exe link: only local root should + * be allowed to. + */ + if (prctl_map->exe_fd != (u32)-1) { + struct user_namespace *ns = current_user_ns(); + const struct cred *cred = current_cred(); + + if (!uid_eq(cred->uid, make_kuid(ns, 0)) || + !gid_eq(cred->gid, make_kgid(ns, 0))) + goto out; + } + + error = 0; +out: + return error; +} + +static int prctl_set_mm_map(int opt, const void __user *addr, unsigned long data_size) +{ + struct prctl_mm_map prctl_map = { .exe_fd = (u32)-1, }; + unsigned long user_auxv[AT_VECTOR_SIZE]; + struct mm_struct *mm = current->mm; + int error; + + BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv)); + BUILD_BUG_ON(sizeof(struct prctl_mm_map) > 256); + + if (opt == PR_SET_MM_MAP_SIZE) + return put_user((unsigned int)sizeof(prctl_map), + (unsigned int __user *)addr); + + if (data_size != sizeof(prctl_map)) + return -EINVAL; + + if (copy_from_user(&prctl_map, addr, sizeof(prctl_map))) + return -EFAULT; + + error = validate_prctl_map(&prctl_map); + if (error) + return error; + + if (prctl_map.auxv_size) { + memset(user_auxv, 0, sizeof(user_auxv)); + if (copy_from_user(user_auxv, + (const void __user *)prctl_map.auxv, + prctl_map.auxv_size)) + return -EFAULT; + + /* Last entry must be AT_NULL as specification requires */ + user_auxv[AT_VECTOR_SIZE - 2] = AT_NULL; + user_auxv[AT_VECTOR_SIZE - 1] = AT_NULL; + } + + down_write(&mm->mmap_sem); + if (prctl_map.exe_fd != (u32)-1) + error = prctl_set_mm_exe_file_locked(mm, prctl_map.exe_fd); + downgrade_write(&mm->mmap_sem); + if (error) + goto out; + + /* + * We don't validate if these members are pointing to + * real present VMAs because application may have correspond + * VMAs already unmapped and kernel uses these members for statistics + * output in procfs mostly, except + * + * - @start_brk/@brk which are used in do_brk but kernel lookups + * for VMAs when updating these memvers so anything wrong written + * here cause kernel to swear at userspace program but won't lead + * to any problem in kernel itself + */ + + mm->start_code = prctl_map.start_code; + mm->end_code = prctl_map.end_code; + mm->start_data = prctl_map.start_data; + mm->end_data = prctl_map.end_data; + mm->start_brk = prctl_map.start_brk; + mm->brk = prctl_map.brk; + mm->start_stack = prctl_map.start_stack; + mm->arg_start = prctl_map.arg_start; + mm->arg_end = prctl_map.arg_end; + mm->env_start = prctl_map.env_start; + mm->env_end = prctl_map.env_end; + + /* + * Note this update of @saved_auxv is lockless thus + * if someone reads this member in procfs while we're + * updating -- it may get partly updated results. It's + * known and acceptable trade off: we leave it as is to + * not introduce additional locks here making the kernel + * more complex. + */ + if (prctl_map.auxv_size) + memcpy(mm->saved_auxv, user_auxv, sizeof(user_auxv)); + + error = 0; +out: + up_read(&mm->mmap_sem); + return error; +} +#endif /* CONFIG_CHECKPOINT_RESTORE */ + static int prctl_set_mm(int opt, unsigned long addr, unsigned long arg4, unsigned long arg5) { @@ -1694,9 +1875,16 @@ static int prctl_set_mm(int opt, unsigned long addr, struct vm_area_struct *vma; int error; - if (arg5 || (arg4 && opt != PR_SET_MM_AUXV)) + if (arg5 || (arg4 && (opt != PR_SET_MM_AUXV && + opt != PR_SET_MM_MAP && + opt != PR_SET_MM_MAP_SIZE))) return -EINVAL; +#ifdef CONFIG_CHECKPOINT_RESTORE + if (opt == PR_SET_MM_MAP || opt == PR_SET_MM_MAP_SIZE) + return prctl_set_mm_map(opt, (const void __user *)addr, arg4); +#endif + if (!capable(CAP_SYS_RESOURCE)) return -EPERM; -- cgit v1.2.3 From 1f13ae399c58af5a05b5cee61da864e1f4071de4 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 9 Oct 2014 15:27:39 -0700 Subject: mm: remove noisy remainder of the scan_unevictable interface The deprecation warnings for the scan_unevictable interface triggers by scripts doing `sysctl -a | grep something else'. This is annoying and not helpful. The interface has been defunct since 264e56d8247e ("mm: disable user interface to manually rescue unevictable pages"), which was in 2011, and there haven't been any reports of usecases for it, only reports that the deprecation warnings are annying. It's unlikely that anybody is using this interface specifically at this point, so remove it. Signed-off-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/ABI/stable/sysfs-devices-node | 8 ---- drivers/base/node.c | 3 -- include/linux/swap.h | 16 -------- kernel/sysctl.c | 7 ---- mm/vmscan.c | 63 ----------------------------- 5 files changed, 97 deletions(-) (limited to 'include') diff --git a/Documentation/ABI/stable/sysfs-devices-node b/Documentation/ABI/stable/sysfs-devices-node index ce259c13c36a..5b2d0f08867c 100644 --- a/Documentation/ABI/stable/sysfs-devices-node +++ b/Documentation/ABI/stable/sysfs-devices-node @@ -85,14 +85,6 @@ Description: will be compacted. When it completes, memory will be freed into blocks which have as many contiguous pages as possible -What: /sys/devices/system/node/nodeX/scan_unevictable_pages -Date: October 2008 -Contact: Lee Schermerhorn -Description: - When set, it triggers scanning the node's unevictable lists - and move any pages that have become evictable onto the respective - zone's inactive list. See mm/vmscan.c - What: /sys/devices/system/node/nodeX/hugepages/hugepages-/ Date: December 2009 Contact: Lee Schermerhorn diff --git a/drivers/base/node.c b/drivers/base/node.c index d51c49c9bafa..472168cd0c97 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -289,8 +289,6 @@ static int register_node(struct node *node, int num, struct node *parent) device_create_file(&node->dev, &dev_attr_distance); device_create_file(&node->dev, &dev_attr_vmstat); - scan_unevictable_register_node(node); - hugetlb_register_node(node); compaction_register_node(node); @@ -314,7 +312,6 @@ void unregister_node(struct node *node) device_remove_file(&node->dev, &dev_attr_distance); device_remove_file(&node->dev, &dev_attr_vmstat); - scan_unevictable_unregister_node(node); hugetlb_unregister_node(node); /* no-op, if memoryless node */ device_unregister(&node->dev); diff --git a/include/linux/swap.h b/include/linux/swap.h index 1b72060f093a..ea4f926e6b9b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -354,22 +354,6 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order) extern int page_evictable(struct page *page); extern void check_move_unevictable_pages(struct page **, int nr_pages); -extern unsigned long scan_unevictable_pages; -extern int scan_unevictable_handler(struct ctl_table *, int, - void __user *, size_t *, loff_t *); -#ifdef CONFIG_NUMA -extern int scan_unevictable_register_node(struct node *node); -extern void scan_unevictable_unregister_node(struct node *node); -#else -static inline int scan_unevictable_register_node(struct node *node) -{ - return 0; -} -static inline void scan_unevictable_unregister_node(struct node *node) -{ -} -#endif - extern int kswapd_run(int nid); extern void kswapd_stop(int nid); #ifdef CONFIG_MEMCG diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 75875a741b5e..91180987e40e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1460,13 +1460,6 @@ static struct ctl_table vm_table[] = { .extra2 = &one, }, #endif - { - .procname = "scan_unevictable_pages", - .data = &scan_unevictable_pages, - .maxlen = sizeof(scan_unevictable_pages), - .mode = 0644, - .proc_handler = scan_unevictable_handler, - }, #ifdef CONFIG_MEMORY_FAILURE { .procname = "memory_failure_early_kill", diff --git a/mm/vmscan.c b/mm/vmscan.c index 1a71b8b1ea34..af72fe8e8d74 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -3797,66 +3797,3 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages) } } #endif /* CONFIG_SHMEM */ - -static void warn_scan_unevictable_pages(void) -{ - printk_once(KERN_WARNING - "%s: The scan_unevictable_pages sysctl/node-interface has been " - "disabled for lack of a legitimate use case. If you have " - "one, please send an email to linux-mm@kvack.org.\n", - current->comm); -} - -/* - * scan_unevictable_pages [vm] sysctl handler. On demand re-scan of - * all nodes' unevictable lists for evictable pages - */ -unsigned long scan_unevictable_pages; - -int scan_unevictable_handler(struct ctl_table *table, int write, - void __user *buffer, - size_t *length, loff_t *ppos) -{ - warn_scan_unevictable_pages(); - proc_doulongvec_minmax(table, write, buffer, length, ppos); - scan_unevictable_pages = 0; - return 0; -} - -#ifdef CONFIG_NUMA -/* - * per node 'scan_unevictable_pages' attribute. On demand re-scan of - * a specified node's per zone unevictable lists for evictable pages. - */ - -static ssize_t read_scan_unevictable_node(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - warn_scan_unevictable_pages(); - return sprintf(buf, "0\n"); /* always zero; should fit... */ -} - -static ssize_t write_scan_unevictable_node(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) -{ - warn_scan_unevictable_pages(); - return 1; -} - - -static DEVICE_ATTR(scan_unevictable_pages, S_IRUGO | S_IWUSR, - read_scan_unevictable_node, - write_scan_unevictable_node); - -int scan_unevictable_register_node(struct node *node) -{ - return device_create_file(&node->dev, &dev_attr_scan_unevictable_pages); -} - -void scan_unevictable_unregister_node(struct node *node) -{ - device_remove_file(&node->dev, &dev_attr_scan_unevictable_pages); -} -#endif -- cgit v1.2.3 From 6b6482bbf64ef6f6dbc8b52f7a7cf88a0498bd51 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:48 -0700 Subject: mempolicy: remove the "task" arg of vma_policy_mof() and simplify it 1. vma_policy_mof(task) is simply not safe unless task == current, it can race with do_exit()->mpol_put(). Remove this arg and update its single caller. 2. vma can not be NULL, remove this check and simplify the code. Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 2 +- kernel/sched/fair.c | 2 +- mm/mempolicy.c | 25 +++++++++++-------------- 3 files changed, 13 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index f230a978e6ba..5e4bfcedd2ce 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -136,7 +136,7 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, struct mempolicy *get_vma_policy(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long addr); -bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma); +bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); extern void numa_policy_init(void); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index bfa3c86d0d68..82088b29704e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1946,7 +1946,7 @@ void task_numa_work(struct callback_head *work) vma = mm->mmap; } for (; vma; vma = vma->vm_next) { - if (!vma_migratable(vma) || !vma_policy_mof(p, vma)) + if (!vma_migratable(vma) || !vma_policy_mof(vma)) continue; /* diff --git a/mm/mempolicy.c b/mm/mempolicy.c index b86b08e77b8d..ad27bbc757bf 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1646,27 +1646,24 @@ struct mempolicy *get_vma_policy(struct task_struct *task, return pol; } -bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma) +bool vma_policy_mof(struct vm_area_struct *vma) { - struct mempolicy *pol = NULL; - - if (vma) { - if (vma->vm_ops && vma->vm_ops->get_policy) { - bool ret = false; + struct mempolicy *pol; - pol = vma->vm_ops->get_policy(vma, vma->vm_start); - if (pol && (pol->flags & MPOL_F_MOF)) - ret = true; - mpol_cond_put(pol); + if (vma->vm_ops && vma->vm_ops->get_policy) { + bool ret = false; - return ret; - } + pol = vma->vm_ops->get_policy(vma, vma->vm_start); + if (pol && (pol->flags & MPOL_F_MOF)) + ret = true; + mpol_cond_put(pol); - pol = vma->vm_policy; + return ret; } + pol = vma->vm_policy; if (!pol) - pol = get_task_policy(task); + pol = get_task_policy(current); return pol->flags & MPOL_F_MOF; } -- cgit v1.2.3 From 74d2c3a05cc6c1eef2d7236a9919036ed85ddaaf Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:50 -0700 Subject: mempolicy: introduce __get_vma_policy(), export get_task_policy() Extract the code which looks for vma's policy from get_vma_policy() into the new helper, __get_vma_policy(). Export get_task_policy(). Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 3 +++ mm/mempolicy.c | 44 ++++++++++++++++++++++++++------------------ 2 files changed, 29 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index 5e4bfcedd2ce..e1abe249892a 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -134,6 +134,9 @@ void mpol_free_shared_policy(struct shared_policy *p); struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx); +struct mempolicy *get_task_policy(struct task_struct *p); +struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, + unsigned long addr); struct mempolicy *get_vma_policy(struct task_struct *tsk, struct vm_area_struct *vma, unsigned long addr); bool vma_policy_mof(struct vm_area_struct *vma); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index ad27bbc757bf..4378c334e89b 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -123,7 +123,7 @@ static struct mempolicy default_policy = { static struct mempolicy preferred_node_policy[MAX_NUMNODES]; -static struct mempolicy *get_task_policy(struct task_struct *p) +struct mempolicy *get_task_policy(struct task_struct *p) { struct mempolicy *pol = p->mempolicy; int node; @@ -1603,23 +1603,8 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len, #endif -/* - * get_vma_policy(@task, @vma, @addr) - * @task: task for fallback if vma policy == default - * @vma: virtual memory area whose policy is sought - * @addr: address in @vma for shared policy lookup - * - * Returns effective policy for a VMA at specified address. - * Falls back to @task or system default policy, as necessary. - * Current or other task's task mempolicy and non-shared vma policies must be - * protected by task_lock(task) by the caller. - * Shared policies [those marked as MPOL_F_SHARED] require an extra reference - * count--added by the get_policy() vm_op, as appropriate--to protect against - * freeing by another task. It is the caller's responsibility to free the - * extra reference for shared policies. - */ -struct mempolicy *get_vma_policy(struct task_struct *task, - struct vm_area_struct *vma, unsigned long addr) +struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, + unsigned long addr) { struct mempolicy *pol = NULL; @@ -1640,6 +1625,29 @@ struct mempolicy *get_vma_policy(struct task_struct *task, } } + return pol; +} + +/* + * get_vma_policy(@task, @vma, @addr) + * @task: task for fallback if vma policy == default + * @vma: virtual memory area whose policy is sought + * @addr: address in @vma for shared policy lookup + * + * Returns effective policy for a VMA at specified address. + * Falls back to @task or system default policy, as necessary. + * Current or other task's task mempolicy and non-shared vma policies must be + * protected by task_lock(task) by the caller. + * Shared policies [those marked as MPOL_F_SHARED] require an extra reference + * count--added by the get_policy() vm_op, as appropriate--to protect against + * freeing by another task. It is the caller's responsibility to free the + * extra reference for shared policies. + */ +struct mempolicy *get_vma_policy(struct task_struct *task, + struct vm_area_struct *vma, unsigned long addr) +{ + struct mempolicy *pol = __get_vma_policy(vma, addr); + if (!pol) pol = get_task_policy(task); -- cgit v1.2.3 From dd6eecb917938c1b7e505a83df307b3476e7c8bd Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 9 Oct 2014 15:27:57 -0700 Subject: mempolicy: unexport get_vma_policy() and remove its "task" arg - get_vma_policy(task) is not safe if task != current, remove this argument. - get_vma_policy() no longer has callers outside of mempolicy.c, make it static. Signed-off-by: Oleg Nesterov Cc: KAMEZAWA Hiroyuki Cc: David Rientjes Cc: KOSAKI Motohiro Cc: Alexander Viro Cc: Cyrill Gorcunov Cc: "Eric W. Biederman" Cc: "Kirill A. Shutemov" Cc: Peter Zijlstra Cc: Hugh Dickins Cc: Andi Kleen Cc: Naoya Horiguchi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mempolicy.h | 2 -- mm/mempolicy.c | 19 ++++++++----------- 2 files changed, 8 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h index e1abe249892a..3d385c81c153 100644 --- a/include/linux/mempolicy.h +++ b/include/linux/mempolicy.h @@ -137,8 +137,6 @@ struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, struct mempolicy *get_task_policy(struct task_struct *p); struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, unsigned long addr); -struct mempolicy *get_vma_policy(struct task_struct *tsk, - struct vm_area_struct *vma, unsigned long addr); bool vma_policy_mof(struct vm_area_struct *vma); extern void numa_default_policy(void); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 9695a9a3ab90..008fb32936eb 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1616,27 +1616,24 @@ struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, } /* - * get_vma_policy(@task, @vma, @addr) - * @task: task for fallback if vma policy == default + * get_vma_policy(@vma, @addr) * @vma: virtual memory area whose policy is sought * @addr: address in @vma for shared policy lookup * * Returns effective policy for a VMA at specified address. - * Falls back to @task or system default policy, as necessary. - * Current or other task's task mempolicy and non-shared vma policies must be - * protected by task_lock(task) by the caller. + * Falls back to current->mempolicy or system default policy, as necessary. * Shared policies [those marked as MPOL_F_SHARED] require an extra reference * count--added by the get_policy() vm_op, as appropriate--to protect against * freeing by another task. It is the caller's responsibility to free the * extra reference for shared policies. */ -struct mempolicy *get_vma_policy(struct task_struct *task, - struct vm_area_struct *vma, unsigned long addr) +static struct mempolicy *get_vma_policy(struct vm_area_struct *vma, + unsigned long addr) { struct mempolicy *pol = __get_vma_policy(vma, addr); if (!pol) - pol = get_task_policy(task); + pol = get_task_policy(current); return pol; } @@ -1864,7 +1861,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, { struct zonelist *zl; - *mpol = get_vma_policy(current, vma, addr); + *mpol = get_vma_policy(vma, addr); *nodemask = NULL; /* assume !MPOL_BIND */ if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) { @@ -2019,7 +2016,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, unsigned int cpuset_mems_cookie; retry_cpuset: - pol = get_vma_policy(current, vma, addr); + pol = get_vma_policy(vma, addr); cpuset_mems_cookie = read_mems_allowed_begin(); if (unlikely(pol->mode == MPOL_INTERLEAVE)) { @@ -2285,7 +2282,7 @@ int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long BUG_ON(!vma); - pol = get_vma_policy(current, vma, addr); + pol = get_vma_policy(vma, addr); if (!(pol->flags & MPOL_F_MOF)) goto out; -- cgit v1.2.3 From 1c93923cc264105418e6ead149c76bd88302eff4 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 9 Oct 2014 15:27:59 -0700 Subject: include/linux/migrate.h: remove migrate_page #define This is designed to avoid a few ifdefs in .c files but it's obnoxious because it can cause unsuspecting "migrate_page" symbols to get turned into "NULL". Just nuke it and use the ifdefs. Cc: Konstantin Khlebnikov Cc: Rafael Aquini Cc: Andrey Ryabinin Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/migrate.h | 3 --- mm/shmem.c | 2 ++ mm/swap_state.c | 2 ++ 3 files changed, 4 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/include/linux/migrate.h b/include/linux/migrate.h index a2901c414664..b66fd10f4b93 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -82,9 +82,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, return -ENOSYS; } -/* Possible settings for the migrate_page() method in address_operations */ -#define migrate_page NULL - #endif /* CONFIG_MIGRATION */ #ifdef CONFIG_NUMA_BALANCING diff --git a/mm/shmem.c b/mm/shmem.c index 469f90d56051..4fad61bb41e5 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -3077,7 +3077,9 @@ static const struct address_space_operations shmem_aops = { .write_begin = shmem_write_begin, .write_end = shmem_write_end, #endif +#ifdef CONFIG_MIGRATION .migratepage = migrate_page, +#endif .error_remove_page = generic_error_remove_page, }; diff --git a/mm/swap_state.c b/mm/swap_state.c index 3e0ec83d000c..ef1f39139b71 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -28,7 +28,9 @@ static const struct address_space_operations swap_aops = { .writepage = swap_writepage, .set_page_dirty = swap_set_page_dirty, +#ifdef CONFIG_MIGRATION .migratepage = migrate_page, +#endif }; static struct backing_dev_info swap_backing_dev_info = { -- cgit v1.2.3 From 0bf55139782db1fa96af66e37cc84afde18443ef Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:28:06 -0700 Subject: mm: introduce dump_vma Introduce a helper to dump information about a VMA, this also makes dump_page_flags more generic and re-uses that so the output looks very similar to dump_page: [ 61.903437] vma ffff88070f88be00 start 00007fff25970000 end 00007fff25992000 [ 61.903437] next ffff88070facd600 prev ffff88070face400 mm ffff88070fade000 [ 61.903437] prot 8000000000000025 anon_vma ffff88070fa1e200 vm_ops (null) [ 61.903437] pgoff 7ffffffdd file (null) private_data (null) [ 61.909129] flags: 0x100173(read|write|mayread|maywrite|mayexec|growsdown|account) [akpm@linux-foundation.org: make dump_vma() require CONFIG_DEBUG_VM] [swarren@nvidia.com: fix dump_vma() compilation] Signed-off-by: Sasha Levin Reviewed-by: Naoya Horiguchi Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Rik van Riel Cc: Mel Gorman Cc: Michal Hocko Cc: Hugh Dickins Cc: Vlastimil Babka Cc: Michel Lespinasse Cc: Minchan Kim Signed-off-by: Stephen Warren Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmdebug.h | 2 ++ mm/page_alloc.c | 82 +++++++++++++++++++++++++++++++++++++++++++------ 2 files changed, 75 insertions(+), 9 deletions(-) (limited to 'include') diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 2f348d02f640..dfb93333fc62 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -4,10 +4,12 @@ #include struct page; +struct vm_area_struct; extern void dump_page(struct page *page, const char *reason); extern void dump_page_badflags(struct page *page, const char *reason, unsigned long badflags); +void dump_vma(const struct vm_area_struct *vma); #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f07588b11d59..3a950144f80b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6626,27 +6626,26 @@ static const struct trace_print_flags pageflag_names[] = { #endif }; -static void dump_page_flags(unsigned long flags) +static void dump_flags(unsigned long flags, + const struct trace_print_flags *names, int count) { const char *delim = ""; unsigned long mask; int i; - BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); - - printk(KERN_ALERT "page flags: %#lx(", flags); + printk(KERN_ALERT "flags: %#lx(", flags); /* remove zone id */ flags &= (1UL << NR_PAGEFLAGS) - 1; - for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) { + for (i = 0; i < count && flags; i++) { - mask = pageflag_names[i].mask; + mask = names[i].mask; if ((flags & mask) != mask) continue; flags &= ~mask; - printk("%s%s", delim, pageflag_names[i].name); + printk("%s%s", delim, names[i].name); delim = "|"; } @@ -6664,12 +6663,14 @@ void dump_page_badflags(struct page *page, const char *reason, "page:%p count:%d mapcount:%d mapping:%p index:%#lx\n", page, atomic_read(&page->_count), page_mapcount(page), page->mapping, page->index); - dump_page_flags(page->flags); + BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS); + dump_flags(page->flags, pageflag_names, ARRAY_SIZE(pageflag_names)); if (reason) pr_alert("page dumped because: %s\n", reason); if (page->flags & badflags) { pr_alert("bad because of flags:\n"); - dump_page_flags(page->flags & badflags); + dump_flags(page->flags & badflags, + pageflag_names, ARRAY_SIZE(pageflag_names)); } mem_cgroup_print_bad_page(page); } @@ -6679,3 +6680,66 @@ void dump_page(struct page *page, const char *reason) dump_page_badflags(page, reason, 0); } EXPORT_SYMBOL(dump_page); + +#ifdef CONFIG_DEBUG_VM + +static const struct trace_print_flags vmaflags_names[] = { + {VM_READ, "read" }, + {VM_WRITE, "write" }, + {VM_EXEC, "exec" }, + {VM_SHARED, "shared" }, + {VM_MAYREAD, "mayread" }, + {VM_MAYWRITE, "maywrite" }, + {VM_MAYEXEC, "mayexec" }, + {VM_MAYSHARE, "mayshare" }, + {VM_GROWSDOWN, "growsdown" }, + {VM_PFNMAP, "pfnmap" }, + {VM_DENYWRITE, "denywrite" }, + {VM_LOCKED, "locked" }, + {VM_IO, "io" }, + {VM_SEQ_READ, "seqread" }, + {VM_RAND_READ, "randread" }, + {VM_DONTCOPY, "dontcopy" }, + {VM_DONTEXPAND, "dontexpand" }, + {VM_ACCOUNT, "account" }, + {VM_NORESERVE, "noreserve" }, + {VM_HUGETLB, "hugetlb" }, + {VM_NONLINEAR, "nonlinear" }, +#if defined(CONFIG_X86) + {VM_PAT, "pat" }, +#elif defined(CONFIG_PPC) + {VM_SAO, "sao" }, +#elif defined(CONFIG_PARISC) || defined(CONFIG_METAG) || defined(CONFIG_IA64) + {VM_GROWSUP, "growsup" }, +#elif !defined(CONFIG_MMU) + {VM_MAPPED_COPY, "mappedcopy" }, +#else + {VM_ARCH_1, "arch_1" }, +#endif + {VM_DONTDUMP, "dontdump" }, +#ifdef CONFIG_MEM_SOFT_DIRTY + {VM_SOFTDIRTY, "softdirty" }, +#endif + {VM_MIXEDMAP, "mixedmap" }, + {VM_HUGEPAGE, "hugepage" }, + {VM_NOHUGEPAGE, "nohugepage" }, + {VM_MERGEABLE, "mergeable" }, +}; + +void dump_vma(const struct vm_area_struct *vma) +{ + printk(KERN_ALERT + "vma %p start %p end %p\n" + "next %p prev %p mm %p\n" + "prot %lx anon_vma %p vm_ops %p\n" + "pgoff %lx file %p private_data %p\n", + vma, (void *)vma->vm_start, (void *)vma->vm_end, vma->vm_next, + vma->vm_prev, vma->vm_mm, + (unsigned long)pgprot_val(vma->vm_page_prot), + vma->anon_vma, vma->vm_ops, vma->vm_pgoff, + vma->vm_file, vma->vm_private_data); + dump_flags(vma->vm_flags, vmaflags_names, ARRAY_SIZE(vmaflags_names)); +} +EXPORT_SYMBOL(dump_vma); + +#endif /* CONFIG_DEBUG_VM */ -- cgit v1.2.3 From fa3759ccd5651c4235f572302d58c8ec9ddf1c4b Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:28:08 -0700 Subject: mm: introduce VM_BUG_ON_VMA Very similar to VM_BUG_ON_PAGE but dumps VMA information instead. Signed-off-by: Sasha Levin Reviewed-by: Naoya Horiguchi Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Rik van Riel Cc: Mel Gorman Cc: Michal Hocko Cc: Hugh Dickins Cc: Vlastimil Babka Cc: Michel Lespinasse Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmdebug.h | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'include') diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index dfb93333fc62..569e4c8d0ebb 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -20,12 +20,20 @@ void dump_vma(const struct vm_area_struct *vma); BUG(); \ } \ } while (0) +#define VM_BUG_ON_VMA(cond, vma) \ + do { \ + if (unlikely(cond)) { \ + dump_vma(vma); \ + BUG(); \ + } \ + } while (0) #define VM_WARN_ON(cond) WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) #else #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) +#define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) -- cgit v1.2.3 From 81d1b09c6be66afac7d41ee52279d9bccbce56d8 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:28:10 -0700 Subject: mm: convert a few VM_BUG_ON callers to VM_BUG_ON_VMA Trivially convert a few VM_BUG_ON calls to VM_BUG_ON_VMA to extract more information when they trigger. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Sasha Levin Reviewed-by: Naoya Horiguchi Cc: Kirill A. Shutemov Cc: Konstantin Khlebnikov Cc: Rik van Riel Cc: Mel Gorman Cc: Michal Hocko Cc: Hugh Dickins Cc: Vlastimil Babka Cc: Michel Lespinasse Cc: Minchan Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/huge_mm.h | 2 +- include/linux/rmap.h | 2 +- mm/huge_memory.c | 6 +++--- mm/hugetlb.c | 14 +++++++------- mm/interval_tree.c | 2 +- mm/mlock.c | 4 ++-- mm/mmap.c | 6 +++--- mm/mremap.c | 3 ++- mm/rmap.c | 8 ++++---- 9 files changed, 24 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 63579cb8d3dc..ad9051bab267 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -132,7 +132,7 @@ extern int __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, static inline int pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma, spinlock_t **ptl) { - VM_BUG_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)); + VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma); if (pmd_trans_huge(*pmd)) return __pmd_trans_huge_lock(pmd, vma, ptl); else diff --git a/include/linux/rmap.h b/include/linux/rmap.h index be574506e6a9..c0c2bce6b0b7 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -150,7 +150,7 @@ int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) { - VM_BUG_ON(vma->anon_vma != next->anon_vma); + VM_BUG_ON_VMA(vma->anon_vma != next->anon_vma, vma); unlink_anon_vmas(next); } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 55ab569c31b4..c13148cc745f 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1096,7 +1096,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long mmun_end; /* For mmu_notifiers */ ptl = pmd_lockptr(mm, pmd); - VM_BUG_ON(!vma->anon_vma); + VM_BUG_ON_VMA(!vma->anon_vma, vma); haddr = address & HPAGE_PMD_MASK; if (is_huge_zero_pmd(orig_pmd)) goto alloc; @@ -2083,7 +2083,7 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma) if (vma->vm_ops) /* khugepaged not yet working on file or special mappings */ return 0; - VM_BUG_ON(vma->vm_flags & VM_NO_THP); + VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (hstart < hend) @@ -2406,7 +2406,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) return false; if (is_vma_temporary_stack(vma)) return false; - VM_BUG_ON(vma->vm_flags & VM_NO_THP); + VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); return true; } diff --git a/mm/hugetlb.c b/mm/hugetlb.c index eeceeeb09019..9fd722769927 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -434,7 +434,7 @@ static inline struct resv_map *inode_resv_map(struct inode *inode) static struct resv_map *vma_resv_map(struct vm_area_struct *vma) { - VM_BUG_ON(!is_vm_hugetlb_page(vma)); + VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (vma->vm_flags & VM_MAYSHARE) { struct address_space *mapping = vma->vm_file->f_mapping; struct inode *inode = mapping->host; @@ -449,8 +449,8 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma) static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) { - VM_BUG_ON(!is_vm_hugetlb_page(vma)); - VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); + VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); + VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, (get_vma_private_data(vma) & HPAGE_RESV_MASK) | (unsigned long)map); @@ -458,15 +458,15 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map) static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags) { - VM_BUG_ON(!is_vm_hugetlb_page(vma)); - VM_BUG_ON(vma->vm_flags & VM_MAYSHARE); + VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); + VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma); set_vma_private_data(vma, get_vma_private_data(vma) | flags); } static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) { - VM_BUG_ON(!is_vm_hugetlb_page(vma)); + VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); return (get_vma_private_data(vma) & flag) != 0; } @@ -474,7 +474,7 @@ static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag) /* Reset counters to 0 and clear all HPAGE_RESV_* flags */ void reset_vma_resv_huge_pages(struct vm_area_struct *vma) { - VM_BUG_ON(!is_vm_hugetlb_page(vma)); + VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma); if (!(vma->vm_flags & VM_MAYSHARE)) vma->vm_private_data = (void *)0; } diff --git a/mm/interval_tree.c b/mm/interval_tree.c index 4a5822a586e6..8da581fa9060 100644 --- a/mm/interval_tree.c +++ b/mm/interval_tree.c @@ -34,7 +34,7 @@ void vma_interval_tree_insert_after(struct vm_area_struct *node, struct vm_area_struct *parent; unsigned long last = vma_last_pgoff(node); - VM_BUG_ON(vma_start_pgoff(node) != vma_start_pgoff(prev)); + VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node); if (!prev->shared.linear.rb.rb_right) { parent = prev; diff --git a/mm/mlock.c b/mm/mlock.c index ce84cb0b83ef..d5d09d0786ec 100644 --- a/mm/mlock.c +++ b/mm/mlock.c @@ -233,8 +233,8 @@ long __mlock_vma_pages_range(struct vm_area_struct *vma, VM_BUG_ON(start & ~PAGE_MASK); VM_BUG_ON(end & ~PAGE_MASK); - VM_BUG_ON(start < vma->vm_start); - VM_BUG_ON(end > vma->vm_end); + VM_BUG_ON_VMA(start < vma->vm_start, vma); + VM_BUG_ON_VMA(end > vma->vm_end, vma); VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem)); gup_flags = FOLL_TOUCH | FOLL_MLOCK; diff --git a/mm/mmap.c b/mm/mmap.c index 7ff38f1a66ec..69d4c5199fd8 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -786,8 +786,8 @@ again: remove_next = 1 + (end > next->vm_end); if (!anon_vma && adjust_next) anon_vma = next->anon_vma; if (anon_vma) { - VM_BUG_ON(adjust_next && next->anon_vma && - anon_vma != next->anon_vma); + VM_BUG_ON_VMA(adjust_next && next->anon_vma && + anon_vma != next->anon_vma, next); anon_vma_lock_write(anon_vma); anon_vma_interval_tree_pre_update_vma(vma); if (adjust_next) @@ -2848,7 +2848,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, * safe. It is only safe to keep the vm_pgoff * linear if there are no pages mapped yet. */ - VM_BUG_ON(faulted_in_anon_vma); + VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma); *vmap = vma = new_vma; } *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff); diff --git a/mm/mremap.c b/mm/mremap.c index 05f1180e9f21..89e45d8a983a 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -195,7 +195,8 @@ unsigned long move_page_tables(struct vm_area_struct *vma, if (pmd_trans_huge(*old_pmd)) { int err = 0; if (extent == HPAGE_PMD_SIZE) { - VM_BUG_ON(vma->vm_file || !vma->anon_vma); + VM_BUG_ON_VMA(vma->vm_file || !vma->anon_vma, + vma); /* See comment in move_ptes() */ if (need_rmap_locks) anon_vma_lock_write(vma->anon_vma); diff --git a/mm/rmap.c b/mm/rmap.c index bc74e0012809..116a5053415b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -527,7 +527,7 @@ vma_address(struct page *page, struct vm_area_struct *vma) unsigned long address = __vma_address(page, vma); /* page should be within @vma mapping range */ - VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); + VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); return address; } @@ -897,7 +897,7 @@ void page_move_anon_rmap(struct page *page, struct anon_vma *anon_vma = vma->anon_vma; VM_BUG_ON_PAGE(!PageLocked(page), page); - VM_BUG_ON(!anon_vma); + VM_BUG_ON_VMA(!anon_vma, vma); VM_BUG_ON_PAGE(page->index != linear_page_index(vma, address), page); anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; @@ -1024,7 +1024,7 @@ void do_page_add_anon_rmap(struct page *page, void page_add_new_anon_rmap(struct page *page, struct vm_area_struct *vma, unsigned long address) { - VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end); + VM_BUG_ON_VMA(address < vma->vm_start || address >= vma->vm_end, vma); SetPageSwapBacked(page); atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */ if (PageTransHuge(page)) @@ -1670,7 +1670,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) * structure at mapping cannot be freed and reused yet, * so we can safely take mapping->i_mmap_mutex. */ - VM_BUG_ON(!PageLocked(page)); + VM_BUG_ON_PAGE(!PageLocked(page), page); if (!mapping) return ret; -- cgit v1.2.3 From 5705465174686d007473e017b76c4b64b44aa690 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 9 Oct 2014 15:28:17 -0700 Subject: mm: clean up zone flags Page reclaim tests zone_is_reclaim_dirty(), but the site that actually sets this state does zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY), sending the reader through layers indirection just to track down a simple bit. Remove all zone flag wrappers and just use bitops against zone->flags directly. It's just as readable and the lines are barely any longer. Also rename ZONE_TAIL_LRU_DIRTY to ZONE_DIRTY to match ZONE_WRITEBACK, and remove the zone_flags_t typedef. Signed-off-by: Johannes Weiner Acked-by: David Rientjes Acked-by: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmzone.h | 51 +++----------------------------------------------- mm/backing-dev.c | 2 +- mm/oom_kill.c | 6 +++--- mm/page_alloc.c | 8 ++++---- mm/vmscan.c | 28 +++++++++++++-------------- 5 files changed, 25 insertions(+), 70 deletions(-) (limited to 'include') diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 318df7051850..48bf12ef6620 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -521,13 +521,13 @@ struct zone { atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; } ____cacheline_internodealigned_in_smp; -typedef enum { +enum zone_flags { ZONE_RECLAIM_LOCKED, /* prevents concurrent reclaim */ ZONE_OOM_LOCKED, /* zone is in OOM killer zonelist */ ZONE_CONGESTED, /* zone has many dirty pages backed by * a congested BDI */ - ZONE_TAIL_LRU_DIRTY, /* reclaim scanning has recently found + ZONE_DIRTY, /* reclaim scanning has recently found * many dirty file pages at the tail * of the LRU. */ @@ -535,52 +535,7 @@ typedef enum { * many pages under writeback */ ZONE_FAIR_DEPLETED, /* fair zone policy batch depleted */ -} zone_flags_t; - -static inline void zone_set_flag(struct zone *zone, zone_flags_t flag) -{ - set_bit(flag, &zone->flags); -} - -static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag) -{ - return test_and_set_bit(flag, &zone->flags); -} - -static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag) -{ - clear_bit(flag, &zone->flags); -} - -static inline int zone_is_reclaim_congested(const struct zone *zone) -{ - return test_bit(ZONE_CONGESTED, &zone->flags); -} - -static inline int zone_is_reclaim_dirty(const struct zone *zone) -{ - return test_bit(ZONE_TAIL_LRU_DIRTY, &zone->flags); -} - -static inline int zone_is_reclaim_writeback(const struct zone *zone) -{ - return test_bit(ZONE_WRITEBACK, &zone->flags); -} - -static inline int zone_is_reclaim_locked(const struct zone *zone) -{ - return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags); -} - -static inline int zone_is_fair_depleted(const struct zone *zone) -{ - return test_bit(ZONE_FAIR_DEPLETED, &zone->flags); -} - -static inline int zone_is_oom_locked(const struct zone *zone) -{ - return test_bit(ZONE_OOM_LOCKED, &zone->flags); -} +}; static inline unsigned long zone_end_pfn(const struct zone *zone) { diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 1706cbbdf5f0..b27714f1b40f 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -631,7 +631,7 @@ long wait_iff_congested(struct zone *zone, int sync, long timeout) * of sleeping on the congestion queue */ if (atomic_read(&nr_bdi_congested[sync]) == 0 || - !zone_is_reclaim_congested(zone)) { + !test_bit(ZONE_CONGESTED, &zone->flags)) { cond_resched(); /* In case we scheduled, work out time remaining */ diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 1e11df8fa7ec..bbf405a3a18f 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -565,7 +565,7 @@ bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask) spin_lock(&zone_scan_lock); for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) - if (zone_is_oom_locked(zone)) { + if (test_bit(ZONE_OOM_LOCKED, &zone->flags)) { ret = false; goto out; } @@ -575,7 +575,7 @@ bool oom_zonelist_trylock(struct zonelist *zonelist, gfp_t gfp_mask) * call to oom_zonelist_trylock() doesn't succeed when it shouldn't. */ for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) - zone_set_flag(zone, ZONE_OOM_LOCKED); + set_bit(ZONE_OOM_LOCKED, &zone->flags); out: spin_unlock(&zone_scan_lock); @@ -594,7 +594,7 @@ void oom_zonelist_unlock(struct zonelist *zonelist, gfp_t gfp_mask) spin_lock(&zone_scan_lock); for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) - zone_clear_flag(zone, ZONE_OOM_LOCKED); + clear_bit(ZONE_OOM_LOCKED, &zone->flags); spin_unlock(&zone_scan_lock); } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ae2f8474273c..f3769f0fce3c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1614,8 +1614,8 @@ again: __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); if (atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH]) <= 0 && - !zone_is_fair_depleted(zone)) - zone_set_flag(zone, ZONE_FAIR_DEPLETED); + !test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) + set_bit(ZONE_FAIR_DEPLETED, &zone->flags); __count_zone_vm_events(PGALLOC, zone, 1 << order); zone_statistics(preferred_zone, zone, gfp_flags); @@ -1935,7 +1935,7 @@ static void reset_alloc_batches(struct zone *preferred_zone) mod_zone_page_state(zone, NR_ALLOC_BATCH, high_wmark_pages(zone) - low_wmark_pages(zone) - atomic_long_read(&zone->vm_stat[NR_ALLOC_BATCH])); - zone_clear_flag(zone, ZONE_FAIR_DEPLETED); + clear_bit(ZONE_FAIR_DEPLETED, &zone->flags); } while (zone++ != preferred_zone); } @@ -1986,7 +1986,7 @@ zonelist_scan: if (alloc_flags & ALLOC_FAIR) { if (!zone_local(preferred_zone, zone)) break; - if (zone_is_fair_depleted(zone)) { + if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { nr_fair_skipped++; continue; } diff --git a/mm/vmscan.c b/mm/vmscan.c index af72fe8e8d74..06123f20a326 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -920,7 +920,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, /* Case 1 above */ if (current_is_kswapd() && PageReclaim(page) && - zone_is_reclaim_writeback(zone)) { + test_bit(ZONE_WRITEBACK, &zone->flags)) { nr_immediate++; goto keep_locked; @@ -1002,7 +1002,7 @@ static unsigned long shrink_page_list(struct list_head *page_list, */ if (page_is_file_cache(page) && (!current_is_kswapd() || - !zone_is_reclaim_dirty(zone))) { + !test_bit(ZONE_DIRTY, &zone->flags))) { /* * Immediately reclaim when written back. * Similar in principal to deactivate_page() @@ -1563,7 +1563,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, * are encountered in the nr_immediate check below. */ if (nr_writeback && nr_writeback == nr_taken) - zone_set_flag(zone, ZONE_WRITEBACK); + set_bit(ZONE_WRITEBACK, &zone->flags); /* * memcg will stall in page writeback so only consider forcibly @@ -1575,16 +1575,16 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, * backed by a congested BDI and wait_iff_congested will stall. */ if (nr_dirty && nr_dirty == nr_congested) - zone_set_flag(zone, ZONE_CONGESTED); + set_bit(ZONE_CONGESTED, &zone->flags); /* * If dirty pages are scanned that are not queued for IO, it * implies that flushers are not keeping up. In this case, flag - * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing - * pages from reclaim context. + * the zone ZONE_DIRTY and kswapd will start writing pages from + * reclaim context. */ if (nr_unqueued_dirty == nr_taken) - zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY); + set_bit(ZONE_DIRTY, &zone->flags); /* * If kswapd scans pages marked marked for immediate @@ -2984,7 +2984,7 @@ static bool kswapd_shrink_zone(struct zone *zone, /* Account for the number of pages attempted to reclaim */ *nr_attempted += sc->nr_to_reclaim; - zone_clear_flag(zone, ZONE_WRITEBACK); + clear_bit(ZONE_WRITEBACK, &zone->flags); /* * If a zone reaches its high watermark, consider it to be no longer @@ -2994,8 +2994,8 @@ static bool kswapd_shrink_zone(struct zone *zone, */ if (zone_reclaimable(zone) && zone_balanced(zone, testorder, 0, classzone_idx)) { - zone_clear_flag(zone, ZONE_CONGESTED); - zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY); + clear_bit(ZONE_CONGESTED, &zone->flags); + clear_bit(ZONE_DIRTY, &zone->flags); } return sc->nr_scanned >= sc->nr_to_reclaim; @@ -3086,8 +3086,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, * If balanced, clear the dirty and congested * flags */ - zone_clear_flag(zone, ZONE_CONGESTED); - zone_clear_flag(zone, ZONE_TAIL_LRU_DIRTY); + clear_bit(ZONE_CONGESTED, &zone->flags); + clear_bit(ZONE_DIRTY, &zone->flags); } } @@ -3714,11 +3714,11 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) if (node_state(node_id, N_CPU) && node_id != numa_node_id()) return ZONE_RECLAIM_NOSCAN; - if (zone_test_and_set_flag(zone, ZONE_RECLAIM_LOCKED)) + if (test_and_set_bit(ZONE_RECLAIM_LOCKED, &zone->flags)) return ZONE_RECLAIM_NOSCAN; ret = __zone_reclaim(zone, gfp_mask, order); - zone_clear_flag(zone, ZONE_RECLAIM_LOCKED); + clear_bit(ZONE_RECLAIM_LOCKED, &zone->flags); if (!ret) count_vm_event(PGSCAN_ZONE_RECLAIM_FAILED); -- cgit v1.2.3 From 934f3072c17cc8886f4c043b47eeeb1b12f8de33 Mon Sep 17 00:00:00 2001 From: Junxiao Bi Date: Thu, 9 Oct 2014 15:28:23 -0700 Subject: mm: clear __GFP_FS when PF_MEMALLOC_NOIO is set commit 21caf2fc1931 ("mm: teach mm by current context info to not do I/O during memory allocation") introduces PF_MEMALLOC_NOIO flag to avoid doing I/O inside memory allocation, __GFP_IO is cleared when this flag is set, but __GFP_FS implies __GFP_IO, it should also be cleared. Or it may still run into I/O, like in superblock shrinker. And this will make the kernel run into the deadlock case described in that commit. See Dave Chinner's comment about io in superblock shrinker: Filesystem shrinkers do indeed perform IO from the superblock shrinker and have for years. Even clean inodes can require IO before they can be freed - e.g. on an orphan list, need truncation of post-eof blocks, need to wait for ordered operations to complete before it can be freed, etc. IOWs, Ext4, btrfs and XFS all can issue and/or block on arbitrary amounts of IO in the superblock shrinker context. XFS, in particular, has been doing transactions and IO from the VFS inode cache shrinker since it was first introduced.... Fix this by clearing __GFP_FS in memalloc_noio_flags(), this function has masked all the gfp_mask that will be passed into fs for the processes setting PF_MEMALLOC_NOIO in the direct reclaim path. v1 thread at: https://lkml.org/lkml/2014/9/3/32 Signed-off-by: Junxiao Bi Cc: Dave Chinner Cc: joyce.xue Cc: Ming Lei Cc: Trond Myklebust Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/sched.h | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/include/linux/sched.h b/include/linux/sched.h index 9c6353d9e63a..5e63ba59258c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1935,11 +1935,13 @@ extern void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) #define used_math() tsk_used_math(current) -/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags */ +/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags + * __GFP_FS is also cleared as it implies __GFP_IO. + */ static inline gfp_t memalloc_noio_flags(gfp_t flags) { if (unlikely(current->flags & PF_MEMALLOC_NOIO)) - flags &= ~__GFP_IO; + flags &= ~(__GFP_IO | __GFP_FS); return flags; } -- cgit v1.2.3 From 31c9afa6db122a5c7a7843278aaf77dd08ea6e98 Mon Sep 17 00:00:00 2001 From: Sasha Levin Date: Thu, 9 Oct 2014 15:28:37 -0700 Subject: mm: introduce VM_BUG_ON_MM Very similar to VM_BUG_ON_PAGE and VM_BUG_ON_VMA, dump struct_mm when the bug is hit. [akpm@linux-foundation.org: coding-style fixes] [mhocko@suse.cz: fix build] [mhocko@suse.cz: fix build some more] [akpm@linux-foundation.org: do strange things to avoid doing strange things for the comma separators] Signed-off-by: Sasha Levin Cc: Dave Jones Signed-off-by: Michal Hocko Cc: Valdis Kletnieks Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mmdebug.h | 10 +++++++ mm/debug.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 88 insertions(+) (limited to 'include') diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index 569e4c8d0ebb..877ef226f90f 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -5,11 +5,13 @@ struct page; struct vm_area_struct; +struct mm_struct; extern void dump_page(struct page *page, const char *reason); extern void dump_page_badflags(struct page *page, const char *reason, unsigned long badflags); void dump_vma(const struct vm_area_struct *vma); +void dump_mm(const struct mm_struct *mm); #ifdef CONFIG_DEBUG_VM #define VM_BUG_ON(cond) BUG_ON(cond) @@ -27,6 +29,13 @@ void dump_vma(const struct vm_area_struct *vma); BUG(); \ } \ } while (0) +#define VM_BUG_ON_MM(cond, mm) \ + do { \ + if (unlikely(cond)) { \ + dump_mm(mm); \ + BUG(); \ + } \ + } while (0) #define VM_WARN_ON(cond) WARN_ON(cond) #define VM_WARN_ON_ONCE(cond) WARN_ON_ONCE(cond) #define VM_WARN_ONCE(cond, format...) WARN_ONCE(cond, format) @@ -34,6 +43,7 @@ void dump_vma(const struct vm_area_struct *vma); #define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_BUG_ON_PAGE(cond, page) VM_BUG_ON(cond) #define VM_BUG_ON_VMA(cond, vma) VM_BUG_ON(cond) +#define VM_BUG_ON_MM(cond, mm) VM_BUG_ON(cond) #define VM_WARN_ON(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE(cond) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) diff --git a/mm/debug.c b/mm/debug.c index 697df9050193..5a1b6194089c 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -1,3 +1,10 @@ +/* + * mm/debug.c + * + * mm/ specific debug routines. + * + */ + #include #include #include @@ -159,4 +166,75 @@ void dump_vma(const struct vm_area_struct *vma) } EXPORT_SYMBOL(dump_vma); +void dump_mm(const struct mm_struct *mm) +{ + printk(KERN_ALERT + "mm %p mmap %p seqnum %d task_size %lu\n" +#ifdef CONFIG_MMU + "get_unmapped_area %p\n" +#endif + "mmap_base %lu mmap_legacy_base %lu highest_vm_end %lu\n" + "pgd %p mm_users %d mm_count %d nr_ptes %lu map_count %d\n" + "hiwater_rss %lx hiwater_vm %lx total_vm %lx locked_vm %lx\n" + "pinned_vm %lx shared_vm %lx exec_vm %lx stack_vm %lx\n" + "start_code %lx end_code %lx start_data %lx end_data %lx\n" + "start_brk %lx brk %lx start_stack %lx\n" + "arg_start %lx arg_end %lx env_start %lx env_end %lx\n" + "binfmt %p flags %lx core_state %p\n" +#ifdef CONFIG_AIO + "ioctx_table %p\n" +#endif +#ifdef CONFIG_MEMCG + "owner %p " +#endif + "exe_file %p\n" +#ifdef CONFIG_MMU_NOTIFIER + "mmu_notifier_mm %p\n" +#endif +#ifdef CONFIG_NUMA_BALANCING + "numa_next_scan %lu numa_scan_offset %lu numa_scan_seq %d\n" +#endif +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) + "tlb_flush_pending %d\n" +#endif + "%s", /* This is here to hold the comma */ + + mm, mm->mmap, mm->vmacache_seqnum, mm->task_size, +#ifdef CONFIG_MMU + mm->get_unmapped_area, +#endif + mm->mmap_base, mm->mmap_legacy_base, mm->highest_vm_end, + mm->pgd, atomic_read(&mm->mm_users), + atomic_read(&mm->mm_count), + atomic_long_read((atomic_long_t *)&mm->nr_ptes), + mm->map_count, + mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, + mm->pinned_vm, mm->shared_vm, mm->exec_vm, mm->stack_vm, + mm->start_code, mm->end_code, mm->start_data, mm->end_data, + mm->start_brk, mm->brk, mm->start_stack, + mm->arg_start, mm->arg_end, mm->env_start, mm->env_end, + mm->binfmt, mm->flags, mm->core_state, +#ifdef CONFIG_AIO + mm->ioctx_table, +#endif +#ifdef CONFIG_MEMCG + mm->owner, +#endif + mm->exe_file, +#ifdef CONFIG_MMU_NOTIFIER + mm->mmu_notifier_mm, +#endif +#ifdef CONFIG_NUMA_BALANCING + mm->numa_next_scan, mm->numa_scan_offset, mm->numa_scan_seq, +#endif +#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION) + mm->tlb_flush_pending, +#endif + "" /* This is here to not have a comma! */ + ); + + dump_flags(mm->def_flags, vmaflags_names, + ARRAY_SIZE(vmaflags_names)); +} + #endif /* CONFIG_DEBUG_VM */ -- cgit v1.2.3 From 33a690c45b202e4c6483bfd1d93ad8d0f51df2ca Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 9 Oct 2014 15:28:43 -0700 Subject: memcg: move memcg_{alloc,free}_cache_params to slab_common.c The only reason why they live in memcontrol.c is that we get/put css reference to the owner memory cgroup in them. However, we can do that in memcg_{un,}register_cache. OTOH, there are several reasons to move them to slab_common.c. First, I think that the less public interface functions we have in memcontrol.h the better. Since the functions I move don't depend on memcontrol, I think it's worth making them private to slab, especially taking into account that the arrays are defined on the slab's side too. Second, the way how per-memcg arrays are updated looks rather awkward: it proceeds from memcontrol.c (__memcg_activate_kmem) to slab_common.c (memcg_update_all_caches) and back to memcontrol.c again (memcg_update_array_size). In the following patches I move the function relocating the arrays (memcg_update_array_size) to slab_common.c and therefore get rid this circular call path. I think we should have the cache allocation stuff in the same place where we have relocation, because it's easier to follow the code then. So I move arrays alloc/free functions to slab_common.c too. The third point isn't obvious. I'm going to make the list_lru structure per-memcg to allow targeted kmem reclaim. That means we will have per-memcg arrays in list_lrus too. It turns out that it's much easier to update these arrays in list_lru.c rather than in memcontrol.c, because all the stuff we need is defined there. This patch makes memcg caches arrays allocation path conform that of the upcoming list_lru. So let's move these functions to slab_common.c and make them static. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Cc: Christoph Lameter Cc: Glauber Costa Cc: Joonsoo Kim Cc: David Rientjes Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 14 -------------- mm/memcontrol.c | 41 ++++------------------------------------- mm/slab_common.c | 44 +++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 47 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e0752d204d9e..4d17242eeff7 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -440,10 +440,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order); int memcg_cache_id(struct mem_cgroup *memcg); -int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache); -void memcg_free_cache_params(struct kmem_cache *s); - int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); @@ -574,16 +570,6 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg) return -1; } -static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, - struct kmem_cache *s, struct kmem_cache *root_cache) -{ - return 0; -} - -static inline void memcg_free_cache_params(struct kmem_cache *s) -{ -} - static inline struct kmem_cache * memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp) { diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 28928ce9b07f..865e87c014d6 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2984,43 +2984,6 @@ int memcg_update_cache_size(struct kmem_cache *s, int num_groups) return 0; } -int memcg_alloc_cache_params(struct mem_cgroup *memcg, struct kmem_cache *s, - struct kmem_cache *root_cache) -{ - size_t size; - - if (!memcg_kmem_enabled()) - return 0; - - if (!memcg) { - size = offsetof(struct memcg_cache_params, memcg_caches); - size += memcg_limited_groups_array_size * sizeof(void *); - } else - size = sizeof(struct memcg_cache_params); - - s->memcg_params = kzalloc(size, GFP_KERNEL); - if (!s->memcg_params) - return -ENOMEM; - - if (memcg) { - s->memcg_params->memcg = memcg; - s->memcg_params->root_cache = root_cache; - css_get(&memcg->css); - } else - s->memcg_params->is_root_cache = true; - - return 0; -} - -void memcg_free_cache_params(struct kmem_cache *s) -{ - if (!s->memcg_params) - return; - if (!s->memcg_params->is_root_cache) - css_put(&s->memcg_params->memcg->css); - kfree(s->memcg_params); -} - static void memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *root_cache) { @@ -3051,6 +3014,7 @@ static void memcg_register_cache(struct mem_cgroup *memcg, if (!cachep) return; + css_get(&memcg->css); list_add(&cachep->memcg_params->list, &memcg->memcg_slab_caches); /* @@ -3084,6 +3048,9 @@ static void memcg_unregister_cache(struct kmem_cache *cachep) list_del(&cachep->memcg_params->list); kmem_cache_destroy(cachep); + + /* drop the reference taken in memcg_register_cache */ + css_put(&memcg->css); } /* diff --git a/mm/slab_common.c b/mm/slab_common.c index f206cb10a544..c2a8661f8b81 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -116,6 +116,38 @@ static inline int kmem_cache_sanity_check(const char *name, size_t size) #endif #ifdef CONFIG_MEMCG_KMEM +static int memcg_alloc_cache_params(struct mem_cgroup *memcg, + struct kmem_cache *s, struct kmem_cache *root_cache) +{ + size_t size; + + if (!memcg_kmem_enabled()) + return 0; + + if (!memcg) { + size = offsetof(struct memcg_cache_params, memcg_caches); + size += memcg_limited_groups_array_size * sizeof(void *); + } else + size = sizeof(struct memcg_cache_params); + + s->memcg_params = kzalloc(size, GFP_KERNEL); + if (!s->memcg_params) + return -ENOMEM; + + if (memcg) { + s->memcg_params->memcg = memcg; + s->memcg_params->root_cache = root_cache; + } else + s->memcg_params->is_root_cache = true; + + return 0; +} + +static void memcg_free_cache_params(struct kmem_cache *s) +{ + kfree(s->memcg_params); +} + int memcg_update_all_caches(int num_memcgs) { struct kmem_cache *s; @@ -141,7 +173,17 @@ out: mutex_unlock(&slab_mutex); return ret; } -#endif +#else +static inline int memcg_alloc_cache_params(struct mem_cgroup *memcg, + struct kmem_cache *s, struct kmem_cache *root_cache) +{ + return 0; +} + +static inline void memcg_free_cache_params(struct kmem_cache *s) +{ +} +#endif /* CONFIG_MEMCG_KMEM */ /* * Find a mergeable slab cache -- cgit v1.2.3 From 6f817f4cda68b09621312ec5ba84217bc5e37b3d Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Thu, 9 Oct 2014 15:28:47 -0700 Subject: memcg: move memcg_update_cache_size() to slab_common.c `While growing per memcg caches arrays, we jump between memcontrol.c and slab_common.c in a weird way: memcg_alloc_cache_id - memcontrol.c memcg_update_all_caches - slab_common.c memcg_update_cache_size - memcontrol.c There's absolutely no reason why memcg_update_cache_size can't live on the slab's side though. So let's move it there and settle it comfortably amid per-memcg cache allocation functions. Besides, this patch cleans this function up a bit, removing all the useless comments from it, and renames it to memcg_update_cache_params to conform to memcg_alloc/free_cache_params, which we already have in slab_common.c. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Cc: Christoph Lameter Cc: Glauber Costa Cc: Joonsoo Kim Cc: David Rientjes Cc: Pekka Enberg Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 1 - mm/memcontrol.c | 49 ---------------------------------------------- mm/slab_common.c | 30 ++++++++++++++++++++++++++-- 3 files changed, 28 insertions(+), 52 deletions(-) (limited to 'include') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 4d17242eeff7..19df5d857411 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -440,7 +440,6 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order); int memcg_cache_id(struct mem_cgroup *memcg); -int memcg_update_cache_size(struct kmem_cache *s, int num_groups); void memcg_update_array_size(int num_groups); struct kmem_cache * diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ef4fbc5e4ca3..fff511e25bb2 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2958,55 +2958,6 @@ void memcg_update_array_size(int num) memcg_limited_groups_array_size = num; } -int memcg_update_cache_size(struct kmem_cache *s, int num_groups) -{ - struct memcg_cache_params *cur_params = s->memcg_params; - struct memcg_cache_params *new_params; - size_t size; - int i; - - VM_BUG_ON(!is_root_cache(s)); - - size = num_groups * sizeof(void *); - size += offsetof(struct memcg_cache_params, memcg_caches); - - new_params = kzalloc(size, GFP_KERNEL); - if (!new_params) - return -ENOMEM; - - new_params->is_root_cache = true; - - /* - * There is the chance it will be bigger than - * memcg_limited_groups_array_size, if we failed an allocation - * in a cache, in which case all caches updated before it, will - * have a bigger array. - * - * But if that is the case, the data after - * memcg_limited_groups_array_size is certainly unused - */ - for (i = 0; i < memcg_limited_groups_array_size; i++) { - if (!cur_params->memcg_caches[i]) - continue; - new_params->memcg_caches[i] = - cur_params->memcg_caches[i]; - } - - /* - * Ideally, we would wait until all caches succeed, and only - * then free the old one. But this is not worth the extra - * pointer per-cache we'd have to have for this. - * - * It is not a big deal if some caches are left with a size - * bigger than the others. And all updates will reset this - * anyway. - */ - rcu_assign_pointer(s->memcg_params, new_params); - if (cur_params) - kfree_rcu(cur_params, rcu_head); - return 0; -} - static void memcg_register_cache(struct mem_cgroup *memcg, struct kmem_cache *root_cache) { diff --git a/mm/slab_common.c b/mm/slab_common.c index c2a8661f8b81..3a6e0cfdf03a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -148,6 +148,33 @@ static void memcg_free_cache_params(struct kmem_cache *s) kfree(s->memcg_params); } +static int memcg_update_cache_params(struct kmem_cache *s, int num_memcgs) +{ + int size; + struct memcg_cache_params *new_params, *cur_params; + + BUG_ON(!is_root_cache(s)); + + size = offsetof(struct memcg_cache_params, memcg_caches); + size += num_memcgs * sizeof(void *); + + new_params = kzalloc(size, GFP_KERNEL); + if (!new_params) + return -ENOMEM; + + cur_params = s->memcg_params; + memcpy(new_params->memcg_caches, cur_params->memcg_caches, + memcg_limited_groups_array_size * sizeof(void *)); + + new_params->is_root_cache = true; + + rcu_assign_pointer(s->memcg_params, new_params); + if (cur_params) + kfree_rcu(cur_params, rcu_head); + + return 0; +} + int memcg_update_all_caches(int num_memcgs) { struct kmem_cache *s; @@ -158,9 +185,8 @@ int memcg_update_all_caches(int num_memcgs) if (!is_root_cache(s)) continue; - ret = memcg_update_cache_size(s, num_memcgs); + ret = memcg_update_cache_params(s, num_memcgs); /* - * See comment in memcontrol.c, memcg_update_cache_size: * Instead of freeing the memory, we'll just leave the caches * up to this point in an updated state. */ -- cgit v1.2.3 From b70a2a21dc9d4ad455931b53131a0cb4fc01fafe Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Thu, 9 Oct 2014 15:28:56 -0700 Subject: mm: memcontrol: fix transparent huge page allocations under pressure In a memcg with even just moderate cache pressure, success rates for transparent huge page allocations drop to zero, wasting a lot of effort that the allocator puts into assembling these pages. The reason for this is that the memcg reclaim code was never designed for higher-order charges. It reclaims in small batches until there is room for at least one page. Huge page charges only succeed when these batches add up over a series of huge faults, which is unlikely under any significant load involving order-0 allocations in the group. Remove that loop on the memcg side in favor of passing the actual reclaim goal to direct reclaim, which is already set up and optimized to meet higher-order goals efficiently. This brings memcg's THP policy in line with the system policy: if the allocator painstakingly assembles a hugepage, memcg will at least make an honest effort to charge it. As a result, transparent hugepage allocation rates amid cache activity are drastically improved: vanilla patched pgalloc 4717530.80 ( +0.00%) 4451376.40 ( -5.64%) pgfault 491370.60 ( +0.00%) 225477.40 ( -54.11%) pgmajfault 2.00 ( +0.00%) 1.80 ( -6.67%) thp_fault_alloc 0.00 ( +0.00%) 531.60 (+100.00%) thp_fault_fallback 749.00 ( +0.00%) 217.40 ( -70.88%) [ Note: this may in turn increase memory consumption from internal fragmentation, which is an inherent risk of transparent hugepages. Some setups may have to adjust the memcg limits accordingly to accomodate this - or, if the machine is already packed to capacity, disable the transparent huge page feature. ] Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Cc: Michal Hocko Cc: Dave Hansen Cc: Greg Thelen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/swap.h | 6 +++-- mm/memcontrol.c | 69 +++++++++++++--------------------------------------- mm/vmscan.c | 7 +++--- 3 files changed, 25 insertions(+), 57 deletions(-) (limited to 'include') diff --git a/include/linux/swap.h b/include/linux/swap.h index ea4f926e6b9b..37a585beef5c 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -327,8 +327,10 @@ extern void lru_cache_add_active_or_unevictable(struct page *page, extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); -extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, - gfp_t gfp_mask, bool noswap); +extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, + unsigned long nr_pages, + gfp_t gfp_mask, + bool may_swap); extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, gfp_t gfp_mask, bool noswap, struct zone *zone, diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9cda99dfac4f..c86cc442ada4 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -480,14 +480,6 @@ enum res_type { /* Used for OOM nofiier */ #define OOM_CONTROL (0) -/* - * Reclaim flags for mem_cgroup_hierarchical_reclaim - */ -#define MEM_CGROUP_RECLAIM_NOSWAP_BIT 0x0 -#define MEM_CGROUP_RECLAIM_NOSWAP (1 << MEM_CGROUP_RECLAIM_NOSWAP_BIT) -#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 -#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) - /* * The memcg_create_mutex will be held whenever a new cgroup is created. * As a consequence, any change that needs to protect against new child cgroups @@ -1805,40 +1797,6 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, NULL, "Memory cgroup out of memory"); } -static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg, - gfp_t gfp_mask, - unsigned long flags) -{ - unsigned long total = 0; - bool noswap = false; - int loop; - - if (flags & MEM_CGROUP_RECLAIM_NOSWAP) - noswap = true; - - for (loop = 0; loop < MEM_CGROUP_MAX_RECLAIM_LOOPS; loop++) { - if (loop) - drain_all_stock_async(memcg); - total += try_to_free_mem_cgroup_pages(memcg, gfp_mask, noswap); - /* - * Allow limit shrinkers, which are triggered directly - * by userspace, to catch signals and stop reclaim - * after minimal progress, regardless of the margin. - */ - if (total && (flags & MEM_CGROUP_RECLAIM_SHRINK)) - break; - if (mem_cgroup_margin(memcg)) - break; - /* - * If nothing was reclaimed after two attempts, there - * may be no reclaimable pages in this hierarchy. - */ - if (loop && !total) - break; - } - return total; -} - /** * test_mem_cgroup_node_reclaimable * @memcg: the target memcg @@ -2541,8 +2499,9 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, struct mem_cgroup *mem_over_limit; struct res_counter *fail_res; unsigned long nr_reclaimed; - unsigned long flags = 0; unsigned long long size; + bool may_swap = true; + bool drained = false; int ret = 0; if (mem_cgroup_is_root(memcg)) @@ -2561,7 +2520,7 @@ retry: mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); } else { mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); - flags |= MEM_CGROUP_RECLAIM_NOSWAP; + may_swap = false; } if (batch > nr_pages) { @@ -2586,11 +2545,18 @@ retry: if (!(gfp_mask & __GFP_WAIT)) goto nomem; - nr_reclaimed = mem_cgroup_reclaim(mem_over_limit, gfp_mask, flags); + nr_reclaimed = try_to_free_mem_cgroup_pages(mem_over_limit, nr_pages, + gfp_mask, may_swap); if (mem_cgroup_margin(mem_over_limit) >= nr_pages) goto retry; + if (!drained) { + drain_all_stock_async(mem_over_limit); + drained = true; + goto retry; + } + if (gfp_mask & __GFP_NORETRY) goto nomem; /* @@ -3666,8 +3632,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, if (!ret) break; - mem_cgroup_reclaim(memcg, GFP_KERNEL, - MEM_CGROUP_RECLAIM_SHRINK); + try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); + curusage = res_counter_read_u64(&memcg->res, RES_USAGE); /* Usage is reduced ? */ if (curusage >= oldusage) @@ -3717,9 +3683,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, if (!ret) break; - mem_cgroup_reclaim(memcg, GFP_KERNEL, - MEM_CGROUP_RECLAIM_NOSWAP | - MEM_CGROUP_RECLAIM_SHRINK); + try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); + curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); /* Usage is reduced ? */ if (curusage >= oldusage) @@ -3968,8 +3933,8 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg) if (signal_pending(current)) return -EINTR; - progress = try_to_free_mem_cgroup_pages(memcg, GFP_KERNEL, - false); + progress = try_to_free_mem_cgroup_pages(memcg, 1, + GFP_KERNEL, true); if (!progress) { nr_retries--; /* maybe some writeback is necessary */ diff --git a/mm/vmscan.c b/mm/vmscan.c index 06123f20a326..dcb47074ae03 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2759,21 +2759,22 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, } unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, + unsigned long nr_pages, gfp_t gfp_mask, - bool noswap) + bool may_swap) { struct zonelist *zonelist; unsigned long nr_reclaimed; int nid; struct scan_control sc = { - .nr_to_reclaim = SWAP_CLUSTER_MAX, + .nr_to_reclaim = max(nr_pages, SWAP_CLUSTER_MAX), .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK), .target_mem_cgroup = memcg, .priority = DEF_PRIORITY, .may_writepage = !laptop_mode, .may_unmap = 1, - .may_swap = !noswap, + .may_swap = may_swap, }; /* -- cgit v1.2.3 From d6d86c0a7f8ddc5b38cf089222cb1d9540762dc2 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 9 Oct 2014 15:29:27 -0700 Subject: mm/balloon_compaction: redesign ballooned pages management Sasha Levin reported KASAN splash inside isolate_migratepages_range(). Problem is in the function __is_movable_balloon_page() which tests AS_BALLOON_MAP in page->mapping->flags. This function has no protection against anonymous pages. As result it tried to check address space flags inside struct anon_vma. Further investigation shows more problems in current implementation: * Special branch in __unmap_and_move() never works: balloon_page_movable() checks page flags and page_count. In __unmap_and_move() page is locked, reference counter is elevated, thus balloon_page_movable() always fails. As a result execution goes to the normal migration path. virtballoon_migratepage() returns MIGRATEPAGE_BALLOON_SUCCESS instead of MIGRATEPAGE_SUCCESS, move_to_new_page() thinks this is an error code and assigns newpage->mapping to NULL. Newly migrated page lose connectivity with balloon an all ability for further migration. * lru_lock erroneously required in isolate_migratepages_range() for isolation ballooned page. This function releases lru_lock periodically, this makes migration mostly impossible for some pages. * balloon_page_dequeue have a tight race with balloon_page_isolate: balloon_page_isolate could be executed in parallel with dequeue between picking page from list and locking page_lock. Race is rare because they use trylock_page() for locking. This patch fixes all of them. Instead of fake mapping with special flag this patch uses special state of page->_mapcount: PAGE_BALLOON_MAPCOUNT_VALUE = -256. Buddy allocator uses PAGE_BUDDY_MAPCOUNT_VALUE = -128 for similar purpose. Storing mark directly in struct page makes everything safer and easier. PagePrivate is used to mark pages present in page list (i.e. not isolated, like PageLRU for normal pages). It replaces special rules for reference counter and makes balloon migration similar to migration of normal pages. This flag is protected by page_lock together with link to the balloon device. Signed-off-by: Konstantin Khlebnikov Reported-by: Sasha Levin Link: http://lkml.kernel.org/p/53E6CEAA.9020105@oracle.com Cc: Rafael Aquini Cc: Andrey Ryabinin Cc: [3.8+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/virtio/virtio_balloon.c | 15 +++--- include/linux/balloon_compaction.h | 97 ++++++++++---------------------------- include/linux/migrate.h | 11 +---- include/linux/mm.h | 19 ++++++++ mm/balloon_compaction.c | 26 +++++----- mm/compaction.c | 2 +- mm/migrate.c | 16 ++----- 7 files changed, 68 insertions(+), 118 deletions(-) (limited to 'include') diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 25ebe8eecdb7..c3eb93fc9261 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -163,8 +163,8 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num) /* Find pfns pointing at start of each page, get pages and free them. */ for (i = 0; i < num; i += VIRTIO_BALLOON_PAGES_PER_PAGE) { struct page *page = balloon_pfn_to_page(pfns[i]); - balloon_page_free(page); adjust_managed_page_count(page, 1); + put_page(page); /* balloon reference */ } } @@ -395,6 +395,8 @@ static int virtballoon_migratepage(struct address_space *mapping, if (!mutex_trylock(&vb->balloon_lock)) return -EAGAIN; + get_page(newpage); /* balloon reference */ + /* balloon's page migration 1st step -- inflate "newpage" */ spin_lock_irqsave(&vb_dev_info->pages_lock, flags); balloon_page_insert(newpage, mapping, &vb_dev_info->pages); @@ -404,12 +406,7 @@ static int virtballoon_migratepage(struct address_space *mapping, set_page_pfns(vb->pfns, newpage); tell_host(vb, vb->inflate_vq); - /* - * balloon's page migration 2nd step -- deflate "page" - * - * It's safe to delete page->lru here because this page is at - * an isolated migration list, and this step is expected to happen here - */ + /* balloon's page migration 2nd step -- deflate "page" */ balloon_page_delete(page); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; set_page_pfns(vb->pfns, page); @@ -417,7 +414,9 @@ static int virtballoon_migratepage(struct address_space *mapping, mutex_unlock(&vb->balloon_lock); - return MIGRATEPAGE_BALLOON_SUCCESS; + put_page(page); /* balloon reference */ + + return MIGRATEPAGE_SUCCESS; } /* define the balloon_mapping->a_ops callback to allow balloon page migration */ diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 089743ade734..38aa07d5b81c 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -27,10 +27,13 @@ * counter raised only while it is under our special handling; * * iii. after the lockless scan step have selected a potential balloon page for - * isolation, re-test the page->mapping flags and the page ref counter + * isolation, re-test the PageBalloon mark and the PagePrivate flag * under the proper page lock, to ensure isolating a valid balloon page * (not yet isolated, nor under release procedure) * + * iv. isolation or dequeueing procedure must clear PagePrivate flag under + * page lock together with removing page from balloon device page list. + * * The functions provided by this interface are placed to help on coping with * the aforementioned balloon page corner case, as well as to ensure the simple * set of exposed rules are satisfied while we are dealing with balloon pages @@ -71,28 +74,6 @@ static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info) kfree(b_dev_info); } -/* - * balloon_page_free - release a balloon page back to the page free lists - * @page: ballooned page to be set free - * - * This function must be used to properly set free an isolated/dequeued balloon - * page at the end of a sucessful page migration, or at the balloon driver's - * page release procedure. - */ -static inline void balloon_page_free(struct page *page) -{ - /* - * Balloon pages always get an extra refcount before being isolated - * and before being dequeued to help on sorting out fortuite colisions - * between a thread attempting to isolate and another thread attempting - * to release the very same balloon page. - * - * Before we handle the page back to Buddy, lets drop its extra refcnt. - */ - put_page(page); - __free_page(page); -} - #ifdef CONFIG_BALLOON_COMPACTION extern bool balloon_page_isolate(struct page *page); extern void balloon_page_putback(struct page *page); @@ -108,74 +89,33 @@ static inline void balloon_mapping_free(struct address_space *balloon_mapping) } /* - * page_flags_cleared - helper to perform balloon @page ->flags tests. - * - * As balloon pages are obtained from buddy and we do not play with page->flags - * at driver level (exception made when we get the page lock for compaction), - * we can safely identify a ballooned page by checking if the - * PAGE_FLAGS_CHECK_AT_PREP page->flags are all cleared. This approach also - * helps us skip ballooned pages that are locked for compaction or release, thus - * mitigating their racy check at balloon_page_movable() - */ -static inline bool page_flags_cleared(struct page *page) -{ - return !(page->flags & PAGE_FLAGS_CHECK_AT_PREP); -} - -/* - * __is_movable_balloon_page - helper to perform @page mapping->flags tests + * __is_movable_balloon_page - helper to perform @page PageBalloon tests */ static inline bool __is_movable_balloon_page(struct page *page) { - struct address_space *mapping = page->mapping; - return mapping_balloon(mapping); + return PageBalloon(page); } /* - * balloon_page_movable - test page->mapping->flags to identify balloon pages - * that can be moved by compaction/migration. - * - * This function is used at core compaction's page isolation scheme, therefore - * most pages exposed to it are not enlisted as balloon pages and so, to avoid - * undesired side effects like racing against __free_pages(), we cannot afford - * holding the page locked while testing page->mapping->flags here. + * balloon_page_movable - test PageBalloon to identify balloon pages + * and PagePrivate to check that the page is not + * isolated and can be moved by compaction/migration. * * As we might return false positives in the case of a balloon page being just - * released under us, the page->mapping->flags need to be re-tested later, - * under the proper page lock, at the functions that will be coping with the - * balloon page case. + * released under us, this need to be re-tested later, under the page lock. */ static inline bool balloon_page_movable(struct page *page) { - /* - * Before dereferencing and testing mapping->flags, let's make sure - * this is not a page that uses ->mapping in a different way - */ - if (page_flags_cleared(page) && !page_mapped(page) && - page_count(page) == 1) - return __is_movable_balloon_page(page); - - return false; + return PageBalloon(page) && PagePrivate(page); } /* * isolated_balloon_page - identify an isolated balloon page on private * compaction/migration page lists. - * - * After a compaction thread isolates a balloon page for migration, it raises - * the page refcount to prevent concurrent compaction threads from re-isolating - * the same page. For that reason putback_movable_pages(), or other routines - * that need to identify isolated balloon pages on private pagelists, cannot - * rely on balloon_page_movable() to accomplish the task. */ static inline bool isolated_balloon_page(struct page *page) { - /* Already isolated balloon pages, by default, have a raised refcount */ - if (page_flags_cleared(page) && !page_mapped(page) && - page_count(page) >= 2) - return __is_movable_balloon_page(page); - - return false; + return PageBalloon(page); } /* @@ -192,6 +132,8 @@ static inline void balloon_page_insert(struct page *page, struct address_space *mapping, struct list_head *head) { + __SetPageBalloon(page); + SetPagePrivate(page); page->mapping = mapping; list_add(&page->lru, head); } @@ -206,8 +148,12 @@ static inline void balloon_page_insert(struct page *page, */ static inline void balloon_page_delete(struct page *page) { + __ClearPageBalloon(page); page->mapping = NULL; - list_del(&page->lru); + if (PagePrivate(page)) { + ClearPagePrivate(page); + list_del(&page->lru); + } } /* @@ -258,6 +204,11 @@ static inline void balloon_page_delete(struct page *page) list_del(&page->lru); } +static inline bool __is_movable_balloon_page(struct page *page) +{ + return false; +} + static inline bool balloon_page_movable(struct page *page) { return false; diff --git a/include/linux/migrate.h b/include/linux/migrate.h index b66fd10f4b93..01aad3ed89ec 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -13,18 +13,9 @@ typedef void free_page_t(struct page *page, unsigned long private); * Return values from addresss_space_operations.migratepage(): * - negative errno on page migration failure; * - zero on page migration success; - * - * The balloon page migration introduces this special case where a 'distinct' - * return code is used to flag a successful page migration to unmap_and_move(). - * This approach is necessary because page migration can race against balloon - * deflation procedure, and for such case we could introduce a nasty page leak - * if a successfully migrated balloon page gets released concurrently with - * migration's unmap_and_move() wrap-up steps. */ #define MIGRATEPAGE_SUCCESS 0 -#define MIGRATEPAGE_BALLOON_SUCCESS 1 /* special ret code for balloon page - * sucessful migration case. - */ + enum migrate_reason { MR_COMPACTION, MR_MEMORY_FAILURE, diff --git a/include/linux/mm.h b/include/linux/mm.h index 4d814aa97785..fa0d74e06428 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -554,6 +554,25 @@ static inline void __ClearPageBuddy(struct page *page) atomic_set(&page->_mapcount, -1); } +#define PAGE_BALLOON_MAPCOUNT_VALUE (-256) + +static inline int PageBalloon(struct page *page) +{ + return atomic_read(&page->_mapcount) == PAGE_BALLOON_MAPCOUNT_VALUE; +} + +static inline void __SetPageBalloon(struct page *page) +{ + VM_BUG_ON_PAGE(atomic_read(&page->_mapcount) != -1, page); + atomic_set(&page->_mapcount, PAGE_BALLOON_MAPCOUNT_VALUE); +} + +static inline void __ClearPageBalloon(struct page *page) +{ + VM_BUG_ON_PAGE(!PageBalloon(page), page); + atomic_set(&page->_mapcount, -1); +} + void put_page(struct page *page); void put_pages_list(struct list_head *pages); diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 6e45a5074bf0..52abeeb3cb9d 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -93,17 +93,12 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) * to be released by the balloon driver. */ if (trylock_page(page)) { + if (!PagePrivate(page)) { + /* raced with isolation */ + unlock_page(page); + continue; + } spin_lock_irqsave(&b_dev_info->pages_lock, flags); - /* - * Raise the page refcount here to prevent any wrong - * attempt to isolate this page, in case of coliding - * with balloon_page_isolate() just after we release - * the page lock. - * - * balloon_page_free() will take care of dropping - * this extra refcount later. - */ - get_page(page); balloon_page_delete(page); spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); unlock_page(page); @@ -187,7 +182,9 @@ static inline void __isolate_balloon_page(struct page *page) { struct balloon_dev_info *b_dev_info = page->mapping->private_data; unsigned long flags; + spin_lock_irqsave(&b_dev_info->pages_lock, flags); + ClearPagePrivate(page); list_del(&page->lru); b_dev_info->isolated_pages++; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); @@ -197,7 +194,9 @@ static inline void __putback_balloon_page(struct page *page) { struct balloon_dev_info *b_dev_info = page->mapping->private_data; unsigned long flags; + spin_lock_irqsave(&b_dev_info->pages_lock, flags); + SetPagePrivate(page); list_add(&page->lru, &b_dev_info->pages); b_dev_info->isolated_pages--; spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); @@ -235,12 +234,11 @@ bool balloon_page_isolate(struct page *page) */ if (likely(trylock_page(page))) { /* - * A ballooned page, by default, has just one refcount. + * A ballooned page, by default, has PagePrivate set. * Prevent concurrent compaction threads from isolating - * an already isolated balloon page by refcount check. + * an already isolated balloon page by clearing it. */ - if (__is_movable_balloon_page(page) && - page_count(page) == 2) { + if (balloon_page_movable(page)) { __isolate_balloon_page(page); unlock_page(page); return true; diff --git a/mm/compaction.c b/mm/compaction.c index b9972c0fd917..edba18aed173 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -640,7 +640,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, */ if (!PageLRU(page)) { if (unlikely(balloon_page_movable(page))) { - if (locked && balloon_page_isolate(page)) { + if (balloon_page_isolate(page)) { /* Successfully isolated */ goto isolate_success; } diff --git a/mm/migrate.c b/mm/migrate.c index 2740360cd216..01439953abf5 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -876,7 +876,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, } } - if (unlikely(balloon_page_movable(page))) { + if (unlikely(isolated_balloon_page(page))) { /* * A ballooned page does not need any special attention from * physical to virtual reverse mapping procedures. @@ -955,17 +955,6 @@ static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, rc = __unmap_and_move(page, newpage, force, mode); - if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { - /* - * A ballooned page has been migrated already. - * Now, it's the time to wrap-up counters, - * handle the page back to Buddy and return. - */ - dec_zone_page_state(page, NR_ISOLATED_ANON + - page_is_file_cache(page)); - balloon_page_free(page); - return MIGRATEPAGE_SUCCESS; - } out: if (rc != -EAGAIN) { /* @@ -988,6 +977,9 @@ out: if (rc != MIGRATEPAGE_SUCCESS && put_new_page) { ClearPageSwapBacked(newpage); put_new_page(newpage, private); + } else if (unlikely(__is_movable_balloon_page(newpage))) { + /* drop our reference, page already in the balloon */ + put_page(newpage); } else putback_lru_page(newpage); -- cgit v1.2.3 From 9d1ba8056474a208ed9efb7e58cd014795d9f818 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 9 Oct 2014 15:29:29 -0700 Subject: mm/balloon_compaction: remove balloon mapping and flag AS_BALLOON_MAP Now ballooned pages are detected using PageBalloon(). Fake mapping is no longer required. This patch links ballooned pages to balloon device using field page->private instead of page->mapping. Also this patch embeds balloon_dev_info directly into struct virtio_balloon. Signed-off-by: Konstantin Khlebnikov Cc: Rafael Aquini Cc: Andrey Ryabinin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/virtio/virtio_balloon.c | 60 ++++++------------------ include/linux/balloon_compaction.h | 72 ++++++++--------------------- include/linux/pagemap.h | 18 +------- mm/balloon_compaction.c | 95 +++----------------------------------- 4 files changed, 39 insertions(+), 206 deletions(-) (limited to 'include') diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index c3eb93fc9261..2bad7f9dd2ac 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -59,7 +59,7 @@ struct virtio_balloon * Each page on this list adds VIRTIO_BALLOON_PAGES_PER_PAGE * to num_pages above. */ - struct balloon_dev_info *vb_dev_info; + struct balloon_dev_info vb_dev_info; /* Synchronize access/update to this struct virtio_balloon elements */ struct mutex balloon_lock; @@ -127,7 +127,7 @@ static void set_page_pfns(u32 pfns[], struct page *page) static void fill_balloon(struct virtio_balloon *vb, size_t num) { - struct balloon_dev_info *vb_dev_info = vb->vb_dev_info; + struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; /* We can only do one array worth at a time. */ num = min(num, ARRAY_SIZE(vb->pfns)); @@ -171,7 +171,7 @@ static void release_pages_by_pfn(const u32 pfns[], unsigned int num) static void leak_balloon(struct virtio_balloon *vb, size_t num) { struct page *page; - struct balloon_dev_info *vb_dev_info = vb->vb_dev_info; + struct balloon_dev_info *vb_dev_info = &vb->vb_dev_info; /* We can only do one array worth at a time. */ num = min(num, ARRAY_SIZE(vb->pfns)); @@ -353,12 +353,11 @@ static int init_vqs(struct virtio_balloon *vb) return 0; } -static const struct address_space_operations virtio_balloon_aops; #ifdef CONFIG_BALLOON_COMPACTION /* * virtballoon_migratepage - perform the balloon page migration on behalf of * a compation thread. (called under page lock) - * @mapping: the page->mapping which will be assigned to the new migrated page. + * @vb_dev_info: the balloon device * @newpage: page that will replace the isolated page after migration finishes. * @page : the isolated (old) page that is about to be migrated to newpage. * @mode : compaction mode -- not used for balloon page migration. @@ -373,17 +372,13 @@ static const struct address_space_operations virtio_balloon_aops; * This function preforms the balloon page migration task. * Called through balloon_mapping->a_ops->migratepage */ -static int virtballoon_migratepage(struct address_space *mapping, +static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, struct page *newpage, struct page *page, enum migrate_mode mode) { - struct balloon_dev_info *vb_dev_info = balloon_page_device(page); - struct virtio_balloon *vb; + struct virtio_balloon *vb = container_of(vb_dev_info, + struct virtio_balloon, vb_dev_info); unsigned long flags; - BUG_ON(!vb_dev_info); - - vb = vb_dev_info->balloon_device; - /* * In order to avoid lock contention while migrating pages concurrently * to leak_balloon() or fill_balloon() we just give up the balloon_lock @@ -399,7 +394,7 @@ static int virtballoon_migratepage(struct address_space *mapping, /* balloon's page migration 1st step -- inflate "newpage" */ spin_lock_irqsave(&vb_dev_info->pages_lock, flags); - balloon_page_insert(newpage, mapping, &vb_dev_info->pages); + balloon_page_insert(vb_dev_info, newpage); vb_dev_info->isolated_pages--; spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; @@ -418,18 +413,11 @@ static int virtballoon_migratepage(struct address_space *mapping, return MIGRATEPAGE_SUCCESS; } - -/* define the balloon_mapping->a_ops callback to allow balloon page migration */ -static const struct address_space_operations virtio_balloon_aops = { - .migratepage = virtballoon_migratepage, -}; #endif /* CONFIG_BALLOON_COMPACTION */ static int virtballoon_probe(struct virtio_device *vdev) { struct virtio_balloon *vb; - struct address_space *vb_mapping; - struct balloon_dev_info *vb_devinfo; int err; vdev->priv = vb = kmalloc(sizeof(*vb), GFP_KERNEL); @@ -445,30 +433,14 @@ static int virtballoon_probe(struct virtio_device *vdev) vb->vdev = vdev; vb->need_stats_update = 0; - vb_devinfo = balloon_devinfo_alloc(vb); - if (IS_ERR(vb_devinfo)) { - err = PTR_ERR(vb_devinfo); - goto out_free_vb; - } - - vb_mapping = balloon_mapping_alloc(vb_devinfo, - (balloon_compaction_check()) ? - &virtio_balloon_aops : NULL); - if (IS_ERR(vb_mapping)) { - /* - * IS_ERR(vb_mapping) && PTR_ERR(vb_mapping) == -EOPNOTSUPP - * This means !CONFIG_BALLOON_COMPACTION, otherwise we get off. - */ - err = PTR_ERR(vb_mapping); - if (err != -EOPNOTSUPP) - goto out_free_vb_devinfo; - } - - vb->vb_dev_info = vb_devinfo; + balloon_devinfo_init(&vb->vb_dev_info); +#ifdef CONFIG_BALLOON_COMPACTION + vb->vb_dev_info.migratepage = virtballoon_migratepage; +#endif err = init_vqs(vb); if (err) - goto out_free_vb_mapping; + goto out_free_vb; vb->thread = kthread_run(balloon, vb, "vballoon"); if (IS_ERR(vb->thread)) { @@ -480,10 +452,6 @@ static int virtballoon_probe(struct virtio_device *vdev) out_del_vqs: vdev->config->del_vqs(vdev); -out_free_vb_mapping: - balloon_mapping_free(vb_mapping); -out_free_vb_devinfo: - balloon_devinfo_free(vb_devinfo); out_free_vb: kfree(vb); out: @@ -509,8 +477,6 @@ static void virtballoon_remove(struct virtio_device *vdev) kthread_stop(vb->thread); remove_common(vb); - balloon_mapping_free(vb->vb_dev_info->mapping); - balloon_devinfo_free(vb->vb_dev_info); kfree(vb); } diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 38aa07d5b81c..bc3d2985cc9a 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -57,21 +57,22 @@ * balloon driver as a page book-keeper for its registered balloon devices. */ struct balloon_dev_info { - void *balloon_device; /* balloon device descriptor */ - struct address_space *mapping; /* balloon special page->mapping */ unsigned long isolated_pages; /* # of isolated pages for migration */ spinlock_t pages_lock; /* Protection to pages list */ struct list_head pages; /* Pages enqueued & handled to Host */ + int (*migratepage)(struct balloon_dev_info *, struct page *newpage, + struct page *page, enum migrate_mode mode); }; extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info); extern struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info); -extern struct balloon_dev_info *balloon_devinfo_alloc( - void *balloon_dev_descriptor); -static inline void balloon_devinfo_free(struct balloon_dev_info *b_dev_info) +static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) { - kfree(b_dev_info); + balloon->isolated_pages = 0; + spin_lock_init(&balloon->pages_lock); + INIT_LIST_HEAD(&balloon->pages); + balloon->migratepage = NULL; } #ifdef CONFIG_BALLOON_COMPACTION @@ -79,14 +80,6 @@ extern bool balloon_page_isolate(struct page *page); extern void balloon_page_putback(struct page *page); extern int balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode); -extern struct address_space -*balloon_mapping_alloc(struct balloon_dev_info *b_dev_info, - const struct address_space_operations *a_ops); - -static inline void balloon_mapping_free(struct address_space *balloon_mapping) -{ - kfree(balloon_mapping); -} /* * __is_movable_balloon_page - helper to perform @page PageBalloon tests @@ -120,27 +113,25 @@ static inline bool isolated_balloon_page(struct page *page) /* * balloon_page_insert - insert a page into the balloon's page list and make - * the page->mapping assignment accordingly. + * the page->private assignment accordingly. + * @balloon : pointer to balloon device * @page : page to be assigned as a 'balloon page' - * @mapping : allocated special 'balloon_mapping' - * @head : balloon's device page list head * * Caller must ensure the page is locked and the spin_lock protecting balloon * pages list is held before inserting a page into the balloon device. */ -static inline void balloon_page_insert(struct page *page, - struct address_space *mapping, - struct list_head *head) +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) { __SetPageBalloon(page); SetPagePrivate(page); - page->mapping = mapping; - list_add(&page->lru, head); + set_page_private(page, (unsigned long)balloon); + list_add(&page->lru, &balloon->pages); } /* * balloon_page_delete - delete a page from balloon's page list and clear - * the page->mapping assignement accordingly. + * the page->private assignement accordingly. * @page : page to be released from balloon's page list * * Caller must ensure the page is locked and the spin_lock protecting balloon @@ -149,7 +140,7 @@ static inline void balloon_page_insert(struct page *page, static inline void balloon_page_delete(struct page *page) { __ClearPageBalloon(page); - page->mapping = NULL; + set_page_private(page, 0); if (PagePrivate(page)) { ClearPagePrivate(page); list_del(&page->lru); @@ -162,11 +153,7 @@ static inline void balloon_page_delete(struct page *page) */ static inline struct balloon_dev_info *balloon_page_device(struct page *page) { - struct address_space *mapping = page->mapping; - if (likely(mapping)) - return mapping->private_data; - - return NULL; + return (struct balloon_dev_info *)page_private(page); } static inline gfp_t balloon_mapping_gfp_mask(void) @@ -174,29 +161,12 @@ static inline gfp_t balloon_mapping_gfp_mask(void) return GFP_HIGHUSER_MOVABLE; } -static inline bool balloon_compaction_check(void) -{ - return true; -} - #else /* !CONFIG_BALLOON_COMPACTION */ -static inline void *balloon_mapping_alloc(void *balloon_device, - const struct address_space_operations *a_ops) +static inline void balloon_page_insert(struct balloon_dev_info *balloon, + struct page *page) { - return ERR_PTR(-EOPNOTSUPP); -} - -static inline void balloon_mapping_free(struct address_space *balloon_mapping) -{ - return; -} - -static inline void balloon_page_insert(struct page *page, - struct address_space *mapping, - struct list_head *head) -{ - list_add(&page->lru, head); + list_add(&page->lru, &balloon->pages); } static inline void balloon_page_delete(struct page *page) @@ -240,9 +210,5 @@ static inline gfp_t balloon_mapping_gfp_mask(void) return GFP_HIGHUSER; } -static inline bool balloon_compaction_check(void) -{ - return false; -} #endif /* CONFIG_BALLOON_COMPACTION */ #endif /* _LINUX_BALLOON_COMPACTION_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 19191d39c4f3..7ea069cd3257 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -24,8 +24,7 @@ enum mapping_flags { AS_ENOSPC = __GFP_BITS_SHIFT + 1, /* ENOSPC on async write */ AS_MM_ALL_LOCKS = __GFP_BITS_SHIFT + 2, /* under mm_take_all_locks() */ AS_UNEVICTABLE = __GFP_BITS_SHIFT + 3, /* e.g., ramdisk, SHM_LOCK */ - AS_BALLOON_MAP = __GFP_BITS_SHIFT + 4, /* balloon page special map */ - AS_EXITING = __GFP_BITS_SHIFT + 5, /* final truncate in progress */ + AS_EXITING = __GFP_BITS_SHIFT + 4, /* final truncate in progress */ }; static inline void mapping_set_error(struct address_space *mapping, int error) @@ -55,21 +54,6 @@ static inline int mapping_unevictable(struct address_space *mapping) return !!mapping; } -static inline void mapping_set_balloon(struct address_space *mapping) -{ - set_bit(AS_BALLOON_MAP, &mapping->flags); -} - -static inline void mapping_clear_balloon(struct address_space *mapping) -{ - clear_bit(AS_BALLOON_MAP, &mapping->flags); -} - -static inline int mapping_balloon(struct address_space *mapping) -{ - return mapping && test_bit(AS_BALLOON_MAP, &mapping->flags); -} - static inline void mapping_set_exiting(struct address_space *mapping) { set_bit(AS_EXITING, &mapping->flags); diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 52abeeb3cb9d..3afdabdbc0a4 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -10,32 +10,6 @@ #include #include -/* - * balloon_devinfo_alloc - allocates a balloon device information descriptor. - * @balloon_dev_descriptor: pointer to reference the balloon device which - * this struct balloon_dev_info will be servicing. - * - * Driver must call it to properly allocate and initialize an instance of - * struct balloon_dev_info which will be used to reference a balloon device - * as well as to keep track of the balloon device page list. - */ -struct balloon_dev_info *balloon_devinfo_alloc(void *balloon_dev_descriptor) -{ - struct balloon_dev_info *b_dev_info; - b_dev_info = kmalloc(sizeof(*b_dev_info), GFP_KERNEL); - if (!b_dev_info) - return ERR_PTR(-ENOMEM); - - b_dev_info->balloon_device = balloon_dev_descriptor; - b_dev_info->mapping = NULL; - b_dev_info->isolated_pages = 0; - spin_lock_init(&b_dev_info->pages_lock); - INIT_LIST_HEAD(&b_dev_info->pages); - - return b_dev_info; -} -EXPORT_SYMBOL_GPL(balloon_devinfo_alloc); - /* * balloon_page_enqueue - allocates a new page and inserts it into the balloon * page list. @@ -61,7 +35,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) */ BUG_ON(!trylock_page(page)); spin_lock_irqsave(&b_dev_info->pages_lock, flags); - balloon_page_insert(page, b_dev_info->mapping, &b_dev_info->pages); + balloon_page_insert(b_dev_info, page); spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); unlock_page(page); return page; @@ -127,60 +101,10 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) EXPORT_SYMBOL_GPL(balloon_page_dequeue); #ifdef CONFIG_BALLOON_COMPACTION -/* - * balloon_mapping_alloc - allocates a special ->mapping for ballooned pages. - * @b_dev_info: holds the balloon device information descriptor. - * @a_ops: balloon_mapping address_space_operations descriptor. - * - * Driver must call it to properly allocate and initialize an instance of - * struct address_space which will be used as the special page->mapping for - * balloon device enlisted page instances. - */ -struct address_space *balloon_mapping_alloc(struct balloon_dev_info *b_dev_info, - const struct address_space_operations *a_ops) -{ - struct address_space *mapping; - - mapping = kmalloc(sizeof(*mapping), GFP_KERNEL); - if (!mapping) - return ERR_PTR(-ENOMEM); - - /* - * Give a clean 'zeroed' status to all elements of this special - * balloon page->mapping struct address_space instance. - */ - address_space_init_once(mapping); - - /* - * Set mapping->flags appropriately, to allow balloon pages - * ->mapping identification. - */ - mapping_set_balloon(mapping); - mapping_set_gfp_mask(mapping, balloon_mapping_gfp_mask()); - - /* balloon's page->mapping->a_ops callback descriptor */ - mapping->a_ops = a_ops; - - /* - * Establish a pointer reference back to the balloon device descriptor - * this particular page->mapping will be servicing. - * This is used by compaction / migration procedures to identify and - * access the balloon device pageset while isolating / migrating pages. - * - * As some balloon drivers can register multiple balloon devices - * for a single guest, this also helps compaction / migration to - * properly deal with multiple balloon pagesets, when required. - */ - mapping->private_data = b_dev_info; - b_dev_info->mapping = mapping; - - return mapping; -} -EXPORT_SYMBOL_GPL(balloon_mapping_alloc); static inline void __isolate_balloon_page(struct page *page) { - struct balloon_dev_info *b_dev_info = page->mapping->private_data; + struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); @@ -192,7 +116,7 @@ static inline void __isolate_balloon_page(struct page *page) static inline void __putback_balloon_page(struct page *page) { - struct balloon_dev_info *b_dev_info = page->mapping->private_data; + struct balloon_dev_info *b_dev_info = balloon_page_device(page); unsigned long flags; spin_lock_irqsave(&b_dev_info->pages_lock, flags); @@ -202,12 +126,6 @@ static inline void __putback_balloon_page(struct page *page) spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); } -static inline int __migrate_balloon_page(struct address_space *mapping, - struct page *newpage, struct page *page, enum migrate_mode mode) -{ - return page->mapping->a_ops->migratepage(mapping, newpage, page, mode); -} - /* __isolate_lru_page() counterpart for a ballooned page */ bool balloon_page_isolate(struct page *page) { @@ -274,7 +192,7 @@ void balloon_page_putback(struct page *page) int balloon_page_migrate(struct page *newpage, struct page *page, enum migrate_mode mode) { - struct address_space *mapping; + struct balloon_dev_info *balloon = balloon_page_device(page); int rc = -EAGAIN; /* @@ -290,9 +208,8 @@ int balloon_page_migrate(struct page *newpage, return rc; } - mapping = page->mapping; - if (mapping) - rc = __migrate_balloon_page(mapping, newpage, page, mode); + if (balloon && balloon->migratepage) + rc = balloon->migratepage(balloon, newpage, page, mode); unlock_page(newpage); return rc; -- cgit v1.2.3 From 09316c09dde33aae14f34489d9e3d243ec0d5938 Mon Sep 17 00:00:00 2001 From: Konstantin Khlebnikov Date: Thu, 9 Oct 2014 15:29:32 -0700 Subject: mm/balloon_compaction: add vmstat counters and kpageflags bit Always mark pages with PageBalloon even if balloon compaction is disabled and expose this mark in /proc/kpageflags as KPF_BALLOON. Also this patch adds three counters into /proc/vmstat: "balloon_inflate", "balloon_deflate" and "balloon_migrate". They accumulate balloon activity. Current size of balloon is (balloon_inflate - balloon_deflate) pages. All generic balloon code now gathered under option CONFIG_MEMORY_BALLOON. It should be selected by ballooning driver which wants use this feature. Currently virtio-balloon is the only user. Signed-off-by: Konstantin Khlebnikov Cc: Rafael Aquini Cc: Andrey Ryabinin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/virtio/Kconfig | 1 + drivers/virtio/virtio_balloon.c | 1 + fs/proc/page.c | 3 +++ include/linux/balloon_compaction.h | 2 ++ include/linux/vm_event_item.h | 7 +++++++ include/uapi/linux/kernel-page-flags.h | 1 + mm/Kconfig | 7 ++++++- mm/Makefile | 3 ++- mm/balloon_compaction.c | 2 ++ mm/vmstat.c | 12 +++++++++++- tools/vm/page-types.c | 1 + 11 files changed, 37 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index c6683f2e396c..00b228638274 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -25,6 +25,7 @@ config VIRTIO_PCI config VIRTIO_BALLOON tristate "Virtio balloon driver" depends on VIRTIO + select MEMORY_BALLOON ---help--- This driver supports increasing and decreasing the amount of memory within a KVM guest. diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index 2bad7f9dd2ac..f893148a107b 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -396,6 +396,7 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, spin_lock_irqsave(&vb_dev_info->pages_lock, flags); balloon_page_insert(vb_dev_info, newpage); vb_dev_info->isolated_pages--; + __count_vm_event(BALLOON_MIGRATE); spin_unlock_irqrestore(&vb_dev_info->pages_lock, flags); vb->num_pfns = VIRTIO_BALLOON_PAGES_PER_PAGE; set_page_pfns(vb->pfns, newpage); diff --git a/fs/proc/page.c b/fs/proc/page.c index e647c55275d9..1e3187da1fed 100644 --- a/fs/proc/page.c +++ b/fs/proc/page.c @@ -133,6 +133,9 @@ u64 stable_page_flags(struct page *page) if (PageBuddy(page)) u |= 1 << KPF_BUDDY; + if (PageBalloon(page)) + u |= 1 << KPF_BALLOON; + u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked); u |= kpf_copy_bit(k, KPF_SLAB, PG_slab); diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index bc3d2985cc9a..9b0a15d06a4f 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -166,11 +166,13 @@ static inline gfp_t balloon_mapping_gfp_mask(void) static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { + __SetPageBalloon(page); list_add(&page->lru, &balloon->pages); } static inline void balloon_page_delete(struct page *page) { + __ClearPageBalloon(page); list_del(&page->lru); } diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index ced92345c963..730334cdf037 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -72,6 +72,13 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_ZERO_PAGE_ALLOC, THP_ZERO_PAGE_ALLOC_FAILED, #endif +#ifdef CONFIG_MEMORY_BALLOON + BALLOON_INFLATE, + BALLOON_DEFLATE, +#ifdef CONFIG_BALLOON_COMPACTION + BALLOON_MIGRATE, +#endif +#endif #ifdef CONFIG_DEBUG_TLBFLUSH #ifdef CONFIG_SMP NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */ diff --git a/include/uapi/linux/kernel-page-flags.h b/include/uapi/linux/kernel-page-flags.h index 5116a0e48172..2f96d233c980 100644 --- a/include/uapi/linux/kernel-page-flags.h +++ b/include/uapi/linux/kernel-page-flags.h @@ -31,6 +31,7 @@ #define KPF_KSM 21 #define KPF_THP 22 +#define KPF_BALLOON 23 #endif /* _UAPILINUX_KERNEL_PAGE_FLAGS_H */ diff --git a/mm/Kconfig b/mm/Kconfig index 0ceb8a567dab..1d1ae6b078fd 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -230,12 +230,17 @@ config SPLIT_PTLOCK_CPUS config ARCH_ENABLE_SPLIT_PMD_PTLOCK boolean +# +# support for memory balloon +config MEMORY_BALLOON + boolean + # # support for memory balloon compaction config BALLOON_COMPACTION bool "Allow for balloon memory compaction/migration" def_bool y - depends on COMPACTION && VIRTIO_BALLOON + depends on COMPACTION && MEMORY_BALLOON help Memory fragmentation introduced by ballooning might reduce significantly the number of 2MB contiguous memory blocks that can be diff --git a/mm/Makefile b/mm/Makefile index f8ed7ab417b1..1f534a7f0a71 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -16,7 +16,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \ util.o mmzone.o vmstat.o backing-dev.o \ mm_init.o mmu_context.o percpu.o slab_common.o \ - compaction.o balloon_compaction.o vmacache.o \ + compaction.o vmacache.o \ interval_tree.o list_lru.o workingset.o \ iov_iter.o debug.o $(mmu-y) @@ -67,3 +67,4 @@ obj-$(CONFIG_ZBUD) += zbud.o obj-$(CONFIG_ZSMALLOC) += zsmalloc.o obj-$(CONFIG_GENERIC_EARLY_IOREMAP) += early_ioremap.o obj-$(CONFIG_CMA) += cma.o +obj-$(CONFIG_MEMORY_BALLOON) += balloon_compaction.o diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c index 3afdabdbc0a4..b3cbe19f71b5 100644 --- a/mm/balloon_compaction.c +++ b/mm/balloon_compaction.c @@ -36,6 +36,7 @@ struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info) BUG_ON(!trylock_page(page)); spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_insert(b_dev_info, page); + __count_vm_event(BALLOON_INFLATE); spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); unlock_page(page); return page; @@ -74,6 +75,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info) } spin_lock_irqsave(&b_dev_info->pages_lock, flags); balloon_page_delete(page); + __count_vm_event(BALLOON_DEFLATE); spin_unlock_irqrestore(&b_dev_info->pages_lock, flags); unlock_page(page); dequeued_page = true; diff --git a/mm/vmstat.c b/mm/vmstat.c index e9ab104b956f..cce7c766da7a 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c @@ -735,7 +735,7 @@ static void walk_zones_in_node(struct seq_file *m, pg_data_t *pgdat, TEXT_FOR_HIGHMEM(xx) xx "_movable", const char * const vmstat_text[] = { - /* Zoned VM counters */ + /* enum zone_stat_item countes */ "nr_free_pages", "nr_alloc_batch", "nr_inactive_anon", @@ -778,10 +778,13 @@ const char * const vmstat_text[] = { "workingset_nodereclaim", "nr_anon_transparent_hugepages", "nr_free_cma", + + /* enum writeback_stat_item counters */ "nr_dirty_threshold", "nr_dirty_background_threshold", #ifdef CONFIG_VM_EVENT_COUNTERS + /* enum vm_event_item counters */ "pgpgin", "pgpgout", "pswpin", @@ -860,6 +863,13 @@ const char * const vmstat_text[] = { "thp_zero_page_alloc", "thp_zero_page_alloc_failed", #endif +#ifdef CONFIG_MEMORY_BALLOON + "balloon_inflate", + "balloon_deflate", +#ifdef CONFIG_BALLOON_COMPACTION + "balloon_migrate", +#endif +#endif /* CONFIG_MEMORY_BALLOON */ #ifdef CONFIG_DEBUG_TLBFLUSH #ifdef CONFIG_SMP "nr_tlb_remote_flush", diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c index c4d6d2e20e0d..264fbc297e0b 100644 --- a/tools/vm/page-types.c +++ b/tools/vm/page-types.c @@ -132,6 +132,7 @@ static const char * const page_flag_names[] = { [KPF_NOPAGE] = "n:nopage", [KPF_KSM] = "x:ksm", [KPF_THP] = "t:thp", + [KPF_BALLOON] = "o:balloon", [KPF_RESERVED] = "r:reserved", [KPF_MLOCKED] = "m:mlocked", -- cgit v1.2.3 From 722cdc17232f0f684011407f7cf3c40d39457971 Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 9 Oct 2014 15:29:50 -0700 Subject: zsmalloc: change return value unit of zs_get_total_size_bytes zs_get_total_size_bytes returns a amount of memory zsmalloc consumed with *byte unit* but zsmalloc operates *page unit* rather than byte unit so let's change the API so benefit we could get is that reduce unnecessary overhead (ie, change page unit with byte unit) in zsmalloc. Since return type is pages, "zs_get_total_pages" is better than "zs_get_total_size_bytes". Signed-off-by: Minchan Kim Reviewed-by: Dan Streetman Cc: Sergey Senozhatsky Cc: Jerome Marchand Cc: Cc: Cc: Luigi Semenzato Cc: Nitin Gupta Cc: Seth Jennings Cc: David Horner Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/block/zram/zram_drv.c | 4 ++-- include/linux/zsmalloc.h | 2 +- mm/zsmalloc.c | 9 ++++----- 3 files changed, 7 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index d00831c3d731..f0b8b30a7128 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -103,10 +103,10 @@ static ssize_t mem_used_total_show(struct device *dev, down_read(&zram->init_lock); if (init_done(zram)) - val = zs_get_total_size_bytes(meta->mem_pool); + val = zs_get_total_pages(meta->mem_pool); up_read(&zram->init_lock); - return scnprintf(buf, PAGE_SIZE, "%llu\n", val); + return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); } static ssize_t max_comp_streams_show(struct device *dev, diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index e44d634e7fb7..05c214760977 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -46,6 +46,6 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle, enum zs_mapmode mm); void zs_unmap_object(struct zs_pool *pool, unsigned long handle); -u64 zs_get_total_size_bytes(struct zs_pool *pool); +unsigned long zs_get_total_pages(struct zs_pool *pool); #endif diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 2a4acf400846..c4a91578dc96 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -297,7 +297,7 @@ static void zs_zpool_unmap(void *pool, unsigned long handle) static u64 zs_zpool_total_size(void *pool) { - return zs_get_total_size_bytes(pool); + return zs_get_total_pages(pool) << PAGE_SHIFT; } static struct zpool_driver zs_zpool_driver = { @@ -1181,12 +1181,11 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) } EXPORT_SYMBOL_GPL(zs_unmap_object); -u64 zs_get_total_size_bytes(struct zs_pool *pool) +unsigned long zs_get_total_pages(struct zs_pool *pool) { - u64 npages = atomic_long_read(&pool->pages_allocated); - return npages << PAGE_SHIFT; + return atomic_long_read(&pool->pages_allocated); } -EXPORT_SYMBOL_GPL(zs_get_total_size_bytes); +EXPORT_SYMBOL_GPL(zs_get_total_pages); module_init(zs_init); module_exit(zs_exit); -- cgit v1.2.3 From 2e1d06e1c05af9dbe8a3bfddeefbf041ca637fff Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Thu, 9 Oct 2014 15:30:13 -0700 Subject: include/linux/kernel.h: rewrite min3, max3 and clamp using min and max It appears that gcc is better at optimising a double call to min and max rather than open coded min3 and max3. This can be observed here: $ cat min-max.c #define min(x, y) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ (void) (&_min1 == &_min2); \ _min1 < _min2 ? _min1 : _min2; }) #define min3(x, y, z) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ typeof(z) _min3 = (z); \ (void) (&_min1 == &_min2); \ (void) (&_min1 == &_min3); \ _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ (_min2 < _min3 ? _min2 : _min3); }) int fmin3(int x, int y, int z) { return min3(x, y, z); } int fmin2(int x, int y, int z) { return min(min(x, y), z); } $ gcc -O2 -o min-max.s -S min-max.c; cat min-max.s .file "min-max.c" .text .p2align 4,,15 .globl fmin3 .type fmin3, @function fmin3: .LFB0: .cfi_startproc cmpl %esi, %edi jl .L5 cmpl %esi, %edx movl %esi, %eax cmovle %edx, %eax ret .p2align 4,,10 .p2align 3 .L5: cmpl %edi, %edx movl %edi, %eax cmovle %edx, %eax ret .cfi_endproc .LFE0: .size fmin3, .-fmin3 .p2align 4,,15 .globl fmin2 .type fmin2, @function fmin2: .LFB1: .cfi_startproc cmpl %edi, %esi movl %edx, %eax cmovle %esi, %edi cmpl %edx, %edi cmovle %edi, %eax ret .cfi_endproc .LFE1: .size fmin2, .-fmin2 .ident "GCC: (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3" .section .note.GNU-stack,"",@progbits fmin3 function, which uses open-coded min3 macro, is compiled into total of ten instructions including a conditional branch, whereas fmin2 function, which uses two calls to min2 macro, is compiled into six instructions with no branches. Similarly, open-coded clamp produces the same code as clamp using min and max macros, but the latter is much shorter: $ cat clamp.c #define clamp(val, min, max) ({ \ typeof(val) __val = (val); \ typeof(min) __min = (min); \ typeof(max) __max = (max); \ (void) (&__val == &__min); \ (void) (&__val == &__max); \ __val = __val < __min ? __min: __val; \ __val > __max ? __max: __val; }) #define min(x, y) ({ \ typeof(x) _min1 = (x); \ typeof(y) _min2 = (y); \ (void) (&_min1 == &_min2); \ _min1 < _min2 ? _min1 : _min2; }) #define max(x, y) ({ \ typeof(x) _max1 = (x); \ typeof(y) _max2 = (y); \ (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) int fclamp(int v, int min, int max) { return clamp(v, min, max); } int fclampmm(int v, int min, int max) { return min(max(v, min), max); } $ gcc -O2 -o clamp.s -S clamp.c; cat clamp.s .file "clamp.c" .text .p2align 4,,15 .globl fclamp .type fclamp, @function fclamp: .LFB0: .cfi_startproc cmpl %edi, %esi movl %edx, %eax cmovge %esi, %edi cmpl %edx, %edi cmovle %edi, %eax ret .cfi_endproc .LFE0: .size fclamp, .-fclamp .p2align 4,,15 .globl fclampmm .type fclampmm, @function fclampmm: .LFB1: .cfi_startproc cmpl %edi, %esi cmovge %esi, %edi cmpl %edi, %edx movl %edi, %eax cmovle %edx, %eax ret .cfi_endproc .LFE1: .size fclampmm, .-fclampmm .ident "GCC: (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3" .section .note.GNU-stack,"",@progbits Linux mpn-glaptop 3.13.0-29-generic #53~precise1-Ubuntu SMP Wed Jun 4 22:06:25 UTC 2014 x86_64 x86_64 x86_64 GNU/Linux gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3 Copyright (C) 2011 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -rwx------ 1 mpn eng 51224656 Jun 17 14:15 vmlinux.before -rwx------ 1 mpn eng 51224608 Jun 17 13:57 vmlinux.after 48 bytes reduction. The do_fault_around was a few instruction shorter and as far as I can tell saved 12 bytes on the stack, i.e.: $ grep -e rsp -e pop -e push do_fault_around.* do_fault_around.before.s:push %rbp do_fault_around.before.s:mov %rsp,%rbp do_fault_around.before.s:push %r13 do_fault_around.before.s:push %r12 do_fault_around.before.s:push %rbx do_fault_around.before.s:sub $0x38,%rsp do_fault_around.before.s:add $0x38,%rsp do_fault_around.before.s:pop %rbx do_fault_around.before.s:pop %r12 do_fault_around.before.s:pop %r13 do_fault_around.before.s:pop %rbp do_fault_around.after.s:push %rbp do_fault_around.after.s:mov %rsp,%rbp do_fault_around.after.s:push %r12 do_fault_around.after.s:push %rbx do_fault_around.after.s:sub $0x30,%rsp do_fault_around.after.s:add $0x30,%rsp do_fault_around.after.s:pop %rbx do_fault_around.after.s:pop %r12 do_fault_around.after.s:pop %rbp or here side-by-side: Before After push %rbp push %rbp mov %rsp,%rbp mov %rsp,%rbp push %r13 push %r12 push %r12 push %rbx push %rbx sub $0x38,%rsp sub $0x30,%rsp add $0x38,%rsp add $0x30,%rsp pop %rbx pop %rbx pop %r12 pop %r12 pop %r13 pop %rbp pop %rbp There are also fewer branches: $ grep ^j do_fault_around.* do_fault_around.before.s:jae ffffffff812079b7 do_fault_around.before.s:jmp ffffffff812079c5 do_fault_around.before.s:jmp ffffffff81207a14 do_fault_around.before.s:ja ffffffff812079f9 do_fault_around.before.s:jb ffffffff81207a10 do_fault_around.before.s:jmp ffffffff81207a63 do_fault_around.before.s:jne ffffffff812079df do_fault_around.after.s:jmp ffffffff812079fd do_fault_around.after.s:ja ffffffff812079e2 do_fault_around.after.s:jb ffffffff812079f9 do_fault_around.after.s:jmp ffffffff81207a4c do_fault_around.after.s:jne ffffffff812079c8 And here's with allyesconfig on a different machine: $ uname -a; gcc --version; ls -l vmlinux.* Linux erwin 3.14.7-mn #54 SMP Sun Jun 15 11:25:08 CEST 2014 x86_64 AMD Phenom(tm) II X3 710 Processor AuthenticAMD GNU/Linux gcc (GCC) 4.8.3 Copyright (C) 2013 Free Software Foundation, Inc. This is free software; see the source for copying conditions. There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -rwx------ 1 mpn eng 437027411 Jun 20 16:04 vmlinux.before -rwx------ 1 mpn eng 437026881 Jun 20 15:30 vmlinux.after 530 bytes reduction. Signed-off-by: Michal Nazarewicz Signed-off-by: Hagen Paul Pfeifer Acked-by: Steven Rostedt Cc: Hagen Paul Pfeifer Cc: David Rientjes Cc: "Rustad, Mark D" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 32 +++++--------------------------- 1 file changed, 5 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 95624bed87ef..aa2a0cb57f50 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -715,23 +715,8 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } (void) (&_max1 == &_max2); \ _max1 > _max2 ? _max1 : _max2; }) -#define min3(x, y, z) ({ \ - typeof(x) _min1 = (x); \ - typeof(y) _min2 = (y); \ - typeof(z) _min3 = (z); \ - (void) (&_min1 == &_min2); \ - (void) (&_min1 == &_min3); \ - _min1 < _min2 ? (_min1 < _min3 ? _min1 : _min3) : \ - (_min2 < _min3 ? _min2 : _min3); }) - -#define max3(x, y, z) ({ \ - typeof(x) _max1 = (x); \ - typeof(y) _max2 = (y); \ - typeof(z) _max3 = (z); \ - (void) (&_max1 == &_max2); \ - (void) (&_max1 == &_max3); \ - _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \ - (_max2 > _max3 ? _max2 : _max3); }) +#define min3(x, y, z) min((typeof(x))min(x, y), z) +#define max3(x, y, z) max((typeof(x))max(x, y), z) /** * min_not_zero - return the minimum that is _not_ zero, unless both are zero @@ -746,20 +731,13 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } /** * clamp - return a value clamped to a given range with strict typechecking * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: lowest allowable value + * @hi: highest allowable value * * This macro does strict typechecking of min/max to make sure they are of the * same type as val. See the unnecessary pointer comparisons. */ -#define clamp(val, min, max) ({ \ - typeof(val) __val = (val); \ - typeof(min) __min = (min); \ - typeof(max) __max = (max); \ - (void) (&__val == &__min); \ - (void) (&__val == &__max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) /* * ..and if you can't take the strict -- cgit v1.2.3 From c185b07fc9f24d52a864376ed22a6d84384b0c53 Mon Sep 17 00:00:00 2001 From: Michal Nazarewicz Date: Thu, 9 Oct 2014 15:30:15 -0700 Subject: include/linux/kernel.h: deduplicate code implementing clamp* macros Instead of open-coding clamp_t macro min_t and max_t the way clamp macro does and instead of open-coding clamp_val simply use clamp_t. Furthermore, normalise argument naming in the macros to be lo and hi. Signed-off-by: Michal Nazarewicz Cc: Mark Rustad Cc: "Kirsher, Jeffrey T" Cc: Hagen Paul Pfeifer Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/kernel.h | 24 +++++++----------------- 1 file changed, 7 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/include/linux/kernel.h b/include/linux/kernel.h index aa2a0cb57f50..e9e420b6d931 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -734,7 +734,7 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * @lo: lowest allowable value * @hi: highest allowable value * - * This macro does strict typechecking of min/max to make sure they are of the + * This macro does strict typechecking of lo/hi to make sure they are of the * same type as val. See the unnecessary pointer comparisons. */ #define clamp(val, lo, hi) min((typeof(val))max(val, lo), hi) @@ -759,36 +759,26 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } * clamp_t - return a value clamped to a given range using a given type * @type: the type of variable to use * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: minimum allowable value + * @hi: maximum allowable value * * This macro does no typechecking and uses temporary variables of type * 'type' to make all the comparisons. */ -#define clamp_t(type, val, min, max) ({ \ - type __val = (val); \ - type __min = (min); \ - type __max = (max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp_t(type, val, lo, hi) min_t(type, max_t(type, val, lo), hi) /** * clamp_val - return a value clamped to a given range using val's type * @val: current value - * @min: minimum allowable value - * @max: maximum allowable value + * @lo: minimum allowable value + * @hi: maximum allowable value * * This macro does no typechecking and uses temporary variables of whatever * type the input argument 'val' is. This is useful when val is an unsigned * type and min and max are literals that will otherwise be assigned a signed * integer type. */ -#define clamp_val(val, min, max) ({ \ - typeof(val) __val = (val); \ - typeof(val) __min = (min); \ - typeof(val) __max = (max); \ - __val = __val < __min ? __min: __val; \ - __val > __max ? __max: __val; }) +#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi) /* -- cgit v1.2.3 From 61a04e5b306ab9d6a30f78e86f1f140d7c888304 Mon Sep 17 00:00:00 2001 From: Michele Curti Date: Thu, 9 Oct 2014 15:30:17 -0700 Subject: include/linux/blkdev.h: use NULL instead of zero Quite useless but it shuts up some warnings. Signed-off-by: Michele Curti Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/blkdev.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 518b46555b80..87be398166d3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1564,7 +1564,7 @@ static inline int blk_rq_map_integrity_sg(struct request_queue *q, } static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) { - return 0; + return NULL; } static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) { -- cgit v1.2.3 From 578b25dfce2990d8bab5631f33a4283bd5b01556 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Oct 2014 15:30:28 -0700 Subject: include/linux/screen_info.h: remove unused ORIG_* macros The ORIG_* macros definitions to access struct screen_info members and all of their users were removed 7 years ago by commit 3ea335100014785f ("Remove magic macros for screen_info structure members"), but (only) the definitions reappeared a few days later in commit ee8e7cfe9d330d6f ("Make asm-x86/bootparam.h includable from userspace."). Remove them for good. Amen. Signed-off-by: Geert Uytterhoeven Cc: "H. Peter Anvin" Cc: Rusty Russell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/screen_info.h | 8 -------- 1 file changed, 8 deletions(-) (limited to 'include') diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h index 005bf3e38db5..f0f8bad54be9 100644 --- a/include/linux/screen_info.h +++ b/include/linux/screen_info.h @@ -5,12 +5,4 @@ extern struct screen_info screen_info; -#define ORIG_X (screen_info.orig_x) -#define ORIG_Y (screen_info.orig_y) -#define ORIG_VIDEO_MODE (screen_info.orig_video_mode) -#define ORIG_VIDEO_COLS (screen_info.orig_video_cols) -#define ORIG_VIDEO_EGA_BX (screen_info.orig_video_ega_bx) -#define ORIG_VIDEO_LINES (screen_info.orig_video_lines) -#define ORIG_VIDEO_ISVGA (screen_info.orig_video_isVGA) -#define ORIG_VIDEO_POINTS (screen_info.orig_video_points) #endif /* _SCREEN_INFO_H */ -- cgit v1.2.3 From 7f8998c7aef3ac9c5f3f2943e083dfa6302e90d0 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 9 Oct 2014 15:30:30 -0700 Subject: nosave: consolidate __nosave_{begin,end} in The different architectures used their own (and different) declarations: extern __visible const void __nosave_begin, __nosave_end; extern const void __nosave_begin, __nosave_end; extern long __nosave_begin, __nosave_end; Consolidate them using the first variant in . Signed-off-by: Geert Uytterhoeven Cc: Russell King Cc: Ralf Baechle Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: "David S. Miller" Cc: Guan Xuetao Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm/kernel/hibernate.c | 3 +-- arch/mips/include/asm/suspend.h | 7 ------- arch/mips/power/cpu.c | 2 +- arch/powerpc/kernel/suspend.c | 4 +--- arch/s390/kernel/suspend.c | 6 +----- arch/sh/include/asm/sections.h | 1 - arch/sparc/power/hibernate.c | 4 +--- arch/unicore32/include/mach/pm.h | 3 --- arch/unicore32/kernel/hibernate.c | 1 + arch/x86/power/hibernate_32.c | 4 +--- arch/x86/power/hibernate_64.c | 4 +--- include/asm-generic/sections.h | 4 ++++ 12 files changed, 12 insertions(+), 31 deletions(-) delete mode 100644 arch/mips/include/asm/suspend.h (limited to 'include') diff --git a/arch/arm/kernel/hibernate.c b/arch/arm/kernel/hibernate.c index bb8b79648643..c4cc50e58c13 100644 --- a/arch/arm/kernel/hibernate.c +++ b/arch/arm/kernel/hibernate.c @@ -21,8 +21,7 @@ #include #include #include - -extern const void __nosave_begin, __nosave_end; +#include int pfn_is_nosave(unsigned long pfn) { diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h deleted file mode 100644 index 3adac3b53d19..000000000000 --- a/arch/mips/include/asm/suspend.h +++ /dev/null @@ -1,7 +0,0 @@ -#ifndef __ASM_SUSPEND_H -#define __ASM_SUSPEND_H - -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - -#endif /* __ASM_SUSPEND_H */ diff --git a/arch/mips/power/cpu.c b/arch/mips/power/cpu.c index 521e5963df05..2129e67723ff 100644 --- a/arch/mips/power/cpu.c +++ b/arch/mips/power/cpu.c @@ -7,7 +7,7 @@ * Author: Hu Hongbing * Wu Zhangjin */ -#include +#include #include #include diff --git a/arch/powerpc/kernel/suspend.c b/arch/powerpc/kernel/suspend.c index 0167d53da30c..a531154cc0f3 100644 --- a/arch/powerpc/kernel/suspend.c +++ b/arch/powerpc/kernel/suspend.c @@ -9,9 +9,7 @@ #include #include - -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; +#include /* * pfn_is_nosave - check if given pfn is in the 'nosave' section diff --git a/arch/s390/kernel/suspend.c b/arch/s390/kernel/suspend.c index a7a7537ce1e7..1c4c5accd220 100644 --- a/arch/s390/kernel/suspend.c +++ b/arch/s390/kernel/suspend.c @@ -13,13 +13,9 @@ #include #include #include +#include #include "entry.h" -/* - * References to section boundaries - */ -extern const void __nosave_begin, __nosave_end; - /* * The restore of the saved pages in an hibernation image will set * the change and referenced bits in the storage key for each page. diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index 1b6199740e98..7a99e6af6372 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h @@ -3,7 +3,6 @@ #include -extern long __nosave_begin, __nosave_end; extern long __machvec_start, __machvec_end; extern char __uncached_start, __uncached_end; extern char __start_eh_frame[], __stop_eh_frame[]; diff --git a/arch/sparc/power/hibernate.c b/arch/sparc/power/hibernate.c index 42b0b8ce699a..17bd2e167e07 100644 --- a/arch/sparc/power/hibernate.c +++ b/arch/sparc/power/hibernate.c @@ -9,11 +9,9 @@ #include #include #include +#include #include -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - struct saved_context saved_context; /* diff --git a/arch/unicore32/include/mach/pm.h b/arch/unicore32/include/mach/pm.h index 4dcd34ae194c..77b522694e74 100644 --- a/arch/unicore32/include/mach/pm.h +++ b/arch/unicore32/include/mach/pm.h @@ -36,8 +36,5 @@ extern int puv3_pm_enter(suspend_state_t state); /* Defined in hibernate_asm.S */ extern int restore_image(pgd_t *resume_pg_dir, struct pbe *restore_pblist); -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - extern struct pbe *restore_pblist; #endif diff --git a/arch/unicore32/kernel/hibernate.c b/arch/unicore32/kernel/hibernate.c index d75ef8b6cb56..9969ec374abb 100644 --- a/arch/unicore32/kernel/hibernate.c +++ b/arch/unicore32/kernel/hibernate.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "mach/pm.h" diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c index 7d28c885d238..291226b952a9 100644 --- a/arch/x86/power/hibernate_32.c +++ b/arch/x86/power/hibernate_32.c @@ -13,13 +13,11 @@ #include #include #include +#include /* Defined in hibernate_asm_32.S */ extern int restore_image(void); -/* References to section boundaries */ -extern const void __nosave_begin, __nosave_end; - /* Pointer to the temporary resume page tables */ pgd_t *resume_pg_dir; diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 35e2bb6c0f37..009947d419a6 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -17,11 +17,9 @@ #include #include #include +#include #include -/* References to section boundaries */ -extern __visible const void __nosave_begin, __nosave_end; - /* Defined in hibernate_asm_64.S */ extern asmlinkage __visible int restore_image(void); diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index f1a24b5c3b90..b58fd667f87b 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h @@ -3,6 +3,8 @@ /* References to section boundaries */ +#include + /* * Usage guidelines: * _text, _data: architecture specific, don't use them in arch-independent code @@ -37,6 +39,8 @@ extern char __start_rodata[], __end_rodata[]; /* Start and end of .ctors section - used for constructor calls. */ extern char __ctors_start[], __ctors_end[]; +extern __visible const void __nosave_begin, __nosave_end; + /* function descriptor handling (if any). Override * in asm/sections.h */ #ifndef dereference_function_descriptor -- cgit v1.2.3