diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/filemap.c | 2 | ||||
-rw-r--r-- | mm/hugetlb.c | 8 | ||||
-rw-r--r-- | mm/internal.h | 13 | ||||
-rw-r--r-- | mm/memcontrol.c | 5 | ||||
-rw-r--r-- | mm/memory.c | 9 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 | ||||
-rw-r--r-- | mm/slab.c | 3 | ||||
-rw-r--r-- | mm/slub.c | 175 | ||||
-rw-r--r-- | mm/swapfile.c | 2 |
10 files changed, 96 insertions, 127 deletions
diff --git a/mm/filemap.c b/mm/filemap.c index b7b1be6dbd83..5c74b68935ac 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -604,7 +604,7 @@ void __lock_page(struct page *page) } EXPORT_SYMBOL(__lock_page); -int fastcall __lock_page_killable(struct page *page) +int __lock_page_killable(struct page *page) { DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index d9a380312467..89e6286a7f57 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -24,14 +24,15 @@ const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL; static unsigned long nr_huge_pages, free_huge_pages, resv_huge_pages; static unsigned long surplus_huge_pages; +static unsigned long nr_overcommit_huge_pages; unsigned long max_huge_pages; +unsigned long sysctl_overcommit_huge_pages; static struct list_head hugepage_freelists[MAX_NUMNODES]; static unsigned int nr_huge_pages_node[MAX_NUMNODES]; static unsigned int free_huge_pages_node[MAX_NUMNODES]; static unsigned int surplus_huge_pages_node[MAX_NUMNODES]; static gfp_t htlb_alloc_mask = GFP_HIGHUSER; unsigned long hugepages_treat_as_movable; -unsigned long nr_overcommit_huge_pages; static int hugetlb_next_nid; /* @@ -119,6 +120,7 @@ static void free_huge_page(struct page *page) struct address_space *mapping; mapping = (struct address_space *) page_private(page); + set_page_private(page, 0); BUG_ON(page_count(page)); INIT_LIST_HEAD(&page->lru); @@ -133,7 +135,6 @@ static void free_huge_page(struct page *page) spin_unlock(&hugetlb_lock); if (mapping) hugetlb_put_quota(mapping, 1); - set_page_private(page, 0); } /* @@ -609,8 +610,9 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write, struct file *file, void __user *buffer, size_t *length, loff_t *ppos) { - spin_lock(&hugetlb_lock); proc_doulongvec_minmax(table, write, file, buffer, length, ppos); + spin_lock(&hugetlb_lock); + nr_overcommit_huge_pages = sysctl_overcommit_huge_pages; spin_unlock(&hugetlb_lock); return 0; } diff --git a/mm/internal.h b/mm/internal.h index 5a9a6200e034..789727309f4d 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -47,4 +47,17 @@ static inline unsigned long page_order(struct page *page) VM_BUG_ON(!PageBuddy(page)); return page_private(page); } + +/* + * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node, + * so all functions starting at paging_init should be marked __init + * in those cases. SPARSEMEM, however, allows for memory hotplug, + * and alloc_bootmem_node is not used. + */ +#ifdef CONFIG_SPARSEMEM +#define __paginginit __meminit +#else +#define __paginginit __init +#endif + #endif diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 6bded84c20c8..631002d085d1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -534,7 +534,6 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, if (scan >= nr_to_scan) break; page = pc->page; - VM_BUG_ON(!pc); if (unlikely(!PageLRU(page))) continue; @@ -1101,7 +1100,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); if (mem == NULL) - return NULL; + return ERR_PTR(-ENOMEM); res_counter_init(&mem->res); @@ -1117,7 +1116,7 @@ free_out: free_mem_cgroup_per_zone_info(mem, node); if (cont->parent != NULL) kfree(mem); - return NULL; + return ERR_PTR(-ENOMEM); } static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss, diff --git a/mm/memory.c b/mm/memory.c index 717aa0e3be2d..ce3c9e4492d8 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2711,6 +2711,13 @@ void print_vma_addr(char *prefix, unsigned long ip) struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + /* + * Do not print if we are in atomic + * contexts (in exception stacks, etc.): + */ + if (preempt_count()) + return; + down_read(&mm->mmap_sem); vma = find_vma(mm, ip); if (vma && vma->vm_file) { @@ -2719,7 +2726,7 @@ void print_vma_addr(char *prefix, unsigned long ip) if (buf) { char *p, *s; - p = d_path(f->f_dentry, f->f_vfsmnt, buf, PAGE_SIZE); + p = d_path(&f->f_path, buf, PAGE_SIZE); if (IS_ERR(p)) p = "?"; s = strrchr(p, '/'); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 8d246c3b340f..6c7ba1a63d23 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1996,7 +1996,7 @@ int show_numa_map(struct seq_file *m, void *v) if (file) { seq_printf(m, " file="); - seq_path(m, file->f_path.mnt, file->f_path.dentry, "\n\t= "); + seq_path(m, &file->f_path, "\n\t= "); } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { seq_printf(m, " heap"); } else if (vma->vm_start <= mm->start_stack && diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 75b979313346..8896e874a67d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3314,7 +3314,7 @@ static inline int pageblock_default_order(unsigned int order) * - mark all memory queues empty * - clear the memory bitmaps */ -static void __meminit free_area_init_core(struct pglist_data *pgdat, +static void __paginginit free_area_init_core(struct pglist_data *pgdat, unsigned long *zones_size, unsigned long *zholes_size) { enum zone_type j; @@ -3438,7 +3438,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) #endif /* CONFIG_FLAT_NODE_MEM_MAP */ } -void __meminit free_area_init_node(int nid, struct pglist_data *pgdat, +void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat, unsigned long *zones_size, unsigned long node_start_pfn, unsigned long *zholes_size) { diff --git a/mm/slab.c b/mm/slab.c index 40c00dacbe4b..473e6c2eaefb 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2630,6 +2630,7 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, slabp->colouroff = colour_off; slabp->s_mem = objp + colour_off; slabp->nodeid = nodeid; + slabp->free = 0; return slabp; } @@ -2683,7 +2684,6 @@ static void cache_init_objs(struct kmem_cache *cachep, slab_bufctl(slabp)[i] = i + 1; } slab_bufctl(slabp)[i - 1] = BUFCTL_END; - slabp->free = 0; } static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags) @@ -2816,7 +2816,6 @@ static int cache_grow(struct kmem_cache *cachep, if (!slabp) goto opps1; - slabp->nodeid = nodeid; slab_map_pages(cachep, slabp, objp); cache_init_objs(cachep, slabp); diff --git a/mm/slub.c b/mm/slub.c index e2989ae243b5..74c65af0a54f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -149,13 +149,6 @@ static inline void ClearSlabDebug(struct page *page) /* Enable to test recovery from slab corruption on boot */ #undef SLUB_RESILIENCY_TEST -/* - * Currently fastpath is not supported if preemption is enabled. - */ -#if defined(CONFIG_FAST_CMPXCHG_LOCAL) && !defined(CONFIG_PREEMPT) -#define SLUB_FASTPATH -#endif - #if PAGE_SHIFT <= 12 /* @@ -211,6 +204,8 @@ static inline void ClearSlabDebug(struct page *page) /* Internal SLUB flags */ #define __OBJECT_POISON 0x80000000 /* Poison object */ #define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */ +#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */ +#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */ /* Not all arches define cache_line_size */ #ifndef cache_line_size @@ -308,7 +303,7 @@ static inline int is_end(void *addr) return (unsigned long)addr & PAGE_MAPPING_ANON; } -void *slab_address(struct page *page) +static void *slab_address(struct page *page) { return page->end - PAGE_MAPPING_ANON; } @@ -1078,14 +1073,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node) struct page *page; int pages = 1 << s->order; - if (s->order) - flags |= __GFP_COMP; - - if (s->flags & SLAB_CACHE_DMA) - flags |= SLUB_DMA; - - if (s->flags & SLAB_RECLAIM_ACCOUNT) - flags |= __GFP_RECLAIMABLE; + flags |= s->allocflags; if (node == -1) page = alloc_pages(flags, s->order); @@ -1519,11 +1507,7 @@ static void *__slab_alloc(struct kmem_cache *s, { void **object; struct page *new; -#ifdef SLUB_FASTPATH - unsigned long flags; - local_irq_save(flags); -#endif if (!c->page) goto new_slab; @@ -1546,10 +1530,6 @@ load_freelist: unlock_out: slab_unlock(c->page); stat(c, ALLOC_SLOWPATH); -out: -#ifdef SLUB_FASTPATH - local_irq_restore(flags); -#endif return object; another_slab: @@ -1581,8 +1561,22 @@ new_slab: c->page = new; goto load_freelist; } - object = NULL; - goto out; + + /* + * No memory available. + * + * If the slab uses higher order allocs but the object is + * smaller than a page size then we can fallback in emergencies + * to the page allocator via kmalloc_large. The page allocator may + * have failed to obtain a higher order page and we can try to + * allocate a single page if the object fits into a single page. + * That is only possible if certain conditions are met that are being + * checked when a slab is created. + */ + if (!(gfpflags & __GFP_NORETRY) && (s->flags & __PAGE_ALLOC_FALLBACK)) + return kmalloc_large(s->objsize, gfpflags); + + return NULL; debug: object = c->page->freelist; if (!alloc_debug_processing(s, c->page, object, addr)) @@ -1609,34 +1603,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, { void **object; struct kmem_cache_cpu *c; - -/* - * The SLUB_FASTPATH path is provisional and is currently disabled if the - * kernel is compiled with preemption or if the arch does not support - * fast cmpxchg operations. There are a couple of coming changes that will - * simplify matters and allow preemption. Ultimately we may end up making - * SLUB_FASTPATH the default. - * - * 1. The introduction of the per cpu allocator will avoid array lookups - * through get_cpu_slab(). A special register can be used instead. - * - * 2. The introduction of per cpu atomic operations (cpu_ops) means that - * we can realize the logic here entirely with per cpu atomics. The - * per cpu atomic ops will take care of the preemption issues. - */ - -#ifdef SLUB_FASTPATH - c = get_cpu_slab(s, raw_smp_processor_id()); - do { - object = c->freelist; - if (unlikely(is_end(object) || !node_match(c, node))) { - object = __slab_alloc(s, gfpflags, node, addr, c); - break; - } - stat(c, ALLOC_FASTPATH); - } while (cmpxchg_local(&c->freelist, object, object[c->offset]) - != object); -#else unsigned long flags; local_irq_save(flags); @@ -1651,7 +1617,6 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, stat(c, ALLOC_FASTPATH); } local_irq_restore(flags); -#endif if (unlikely((gfpflags & __GFP_ZERO) && object)) memset(object, 0, c->objsize); @@ -1688,11 +1653,6 @@ static void __slab_free(struct kmem_cache *s, struct page *page, void **object = (void *)x; struct kmem_cache_cpu *c; -#ifdef SLUB_FASTPATH - unsigned long flags; - - local_irq_save(flags); -#endif c = get_cpu_slab(s, raw_smp_processor_id()); stat(c, FREE_SLOWPATH); slab_lock(page); @@ -1724,9 +1684,6 @@ checks_ok: out_unlock: slab_unlock(page); -#ifdef SLUB_FASTPATH - local_irq_restore(flags); -#endif return; slab_empty: @@ -1739,9 +1696,6 @@ slab_empty: } slab_unlock(page); stat(c, FREE_SLAB); -#ifdef SLUB_FASTPATH - local_irq_restore(flags); -#endif discard_slab(s, page); return; @@ -1767,34 +1721,6 @@ static __always_inline void slab_free(struct kmem_cache *s, { void **object = (void *)x; struct kmem_cache_cpu *c; - -#ifdef SLUB_FASTPATH - void **freelist; - - c = get_cpu_slab(s, raw_smp_processor_id()); - debug_check_no_locks_freed(object, s->objsize); - do { - freelist = c->freelist; - barrier(); - /* - * If the compiler would reorder the retrieval of c->page to - * come before c->freelist then an interrupt could - * change the cpu slab before we retrieve c->freelist. We - * could be matching on a page no longer active and put the - * object onto the freelist of the wrong slab. - * - * On the other hand: If we already have the freelist pointer - * then any change of cpu_slab will cause the cmpxchg to fail - * since the freelist pointers are unique per slab. - */ - if (unlikely(page != c->page || c->node < 0)) { - __slab_free(s, page, x, addr, c->offset); - break; - } - object[c->offset] = freelist; - stat(c, FREE_FASTPATH); - } while (cmpxchg_local(&c->freelist, freelist, object) != freelist); -#else unsigned long flags; local_irq_save(flags); @@ -1808,7 +1734,6 @@ static __always_inline void slab_free(struct kmem_cache *s, __slab_free(s, page, x, addr, c->offset); local_irq_restore(flags); -#endif } void kmem_cache_free(struct kmem_cache *s, void *x) @@ -2329,10 +2254,33 @@ static int calculate_sizes(struct kmem_cache *s) size = ALIGN(size, align); s->size = size; - s->order = calculate_order(size); + if ((flags & __KMALLOC_CACHE) && + PAGE_SIZE / size < slub_min_objects) { + /* + * Kmalloc cache that would not have enough objects in + * an order 0 page. Kmalloc slabs can fallback to + * page allocator order 0 allocs so take a reasonably large + * order that will allows us a good number of objects. + */ + s->order = max(slub_max_order, PAGE_ALLOC_COSTLY_ORDER); + s->flags |= __PAGE_ALLOC_FALLBACK; + s->allocflags |= __GFP_NOWARN; + } else + s->order = calculate_order(size); + if (s->order < 0) return 0; + s->allocflags = 0; + if (s->order) + s->allocflags |= __GFP_COMP; + + if (s->flags & SLAB_CACHE_DMA) + s->allocflags |= SLUB_DMA; + + if (s->flags & SLAB_RECLAIM_ACCOUNT) + s->allocflags |= __GFP_RECLAIMABLE; + /* * Determine the number of objects per slab */ @@ -2484,11 +2432,11 @@ EXPORT_SYMBOL(kmem_cache_destroy); * Kmalloc subsystem *******************************************************************/ -struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned; +struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned; EXPORT_SYMBOL(kmalloc_caches); #ifdef CONFIG_ZONE_DMA -static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT]; +static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1]; #endif static int __init setup_slub_min_order(char *str) @@ -2536,7 +2484,7 @@ static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s, down_write(&slub_lock); if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN, - flags, NULL)) + flags | __KMALLOC_CACHE, NULL)) goto panic; list_add(&s->list, &slab_caches); @@ -2670,9 +2618,8 @@ void *__kmalloc(size_t size, gfp_t flags) { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(flags | __GFP_COMP, - get_order(size)); + if (unlikely(size > PAGE_SIZE)) + return kmalloc_large(size, flags); s = get_slab(size, flags); @@ -2688,9 +2635,8 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node) { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(flags | __GFP_COMP, - get_order(size)); + if (unlikely(size > PAGE_SIZE)) + return kmalloc_large(size, flags); s = get_slab(size, flags); @@ -3001,7 +2947,7 @@ void __init kmem_cache_init(void) caches++; } - for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) { + for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) { create_kmalloc_cache(&kmalloc_caches[i], "kmalloc", 1 << i, GFP_KERNEL); caches++; @@ -3028,7 +2974,7 @@ void __init kmem_cache_init(void) slab_state = UP; /* Provide the correct kmalloc names now that the caches are up */ - for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) + for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) kmalloc_caches[i]. name = kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i); @@ -3057,6 +3003,9 @@ static int slab_unmergeable(struct kmem_cache *s) if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE)) return 1; + if ((s->flags & __PAGE_ALLOC_FALLBACK)) + return 1; + if (s->ctor) return 1; @@ -3218,9 +3167,9 @@ void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller) { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(gfpflags | __GFP_COMP, - get_order(size)); + if (unlikely(size > PAGE_SIZE)) + return kmalloc_large(size, gfpflags); + s = get_slab(size, gfpflags); if (unlikely(ZERO_OR_NULL_PTR(s))) @@ -3234,9 +3183,9 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags, { struct kmem_cache *s; - if (unlikely(size > PAGE_SIZE / 2)) - return (void *)__get_free_pages(gfpflags | __GFP_COMP, - get_order(size)); + if (unlikely(size > PAGE_SIZE)) + return kmalloc_large(size, gfpflags); + s = get_slab(size, gfpflags); if (unlikely(ZERO_OR_NULL_PTR(s))) diff --git a/mm/swapfile.c b/mm/swapfile.c index 02ccab5ad9d9..2da149cfc9ac 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1394,7 +1394,7 @@ static int swap_show(struct seq_file *swap, void *v) } file = ptr->swap_file; - len = seq_path(swap, file->f_path.mnt, file->f_path.dentry, " \t\n\\"); + len = seq_path(swap, &file->f_path, " \t\n\\"); seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", len < 40 ? 40 - len : 1, " ", S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? |