diff options
| author | Bob Peterson <rpeterso@redhat.com> | 2018-01-19 00:17:13 +0300 |
|---|---|---|
| committer | Bob Peterson <rpeterso@redhat.com> | 2018-01-19 00:17:13 +0300 |
| commit | 786ebd9f68cdf512f389e5f2d0015f1beb0777d8 (patch) | |
| tree | b4bae82884c2232f891aec11e58b38eb4fec5e5d /mm/page_alloc.c | |
| parent | 1f23bc7869fffec40b8bd9333a74a18d1de54d98 (diff) | |
| parent | 4e56a6411fbce6f859566e17298114c2434391a4 (diff) | |
| download | linux-786ebd9f68cdf512f389e5f2d0015f1beb0777d8.tar.xz | |
Merge branch 'punch-hole' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2.git
Diffstat (limited to 'mm/page_alloc.c')
| -rw-r--r-- | mm/page_alloc.c | 24 |
1 files changed, 19 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d4096f4a5c1f..7e5e775e97f4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2507,10 +2507,6 @@ void drain_all_pages(struct zone *zone) if (WARN_ON_ONCE(!mm_percpu_wq)) return; - /* Workqueues cannot recurse */ - if (current->flags & PF_WQ_WORKER) - return; - /* * Do not drain if one is already in progress unless it's specific to * a zone. Such callers are primarily CMA and memory hotplug and need @@ -2688,6 +2684,7 @@ void free_unref_page_list(struct list_head *list) { struct page *page, *next; unsigned long flags, pfn; + int batch_count = 0; /* Prepare pages for freeing */ list_for_each_entry_safe(page, next, list, lru) { @@ -2704,6 +2701,16 @@ void free_unref_page_list(struct list_head *list) set_page_private(page, 0); trace_mm_page_free_batched(page); free_unref_page_commit(page, pfn); + + /* + * Guard against excessive IRQ disabled times when we get + * a large list of pages to free. + */ + if (++batch_count == SWAP_CLUSTER_MAX) { + local_irq_restore(flags); + batch_count = 0; + local_irq_save(flags); + } } local_irq_restore(flags); } @@ -7656,11 +7663,18 @@ int alloc_contig_range(unsigned long start, unsigned long end, /* * In case of -EBUSY, we'd like to know which page causes problem. - * So, just fall through. We will check it in test_pages_isolated(). + * So, just fall through. test_pages_isolated() has a tracepoint + * which will report the busy page. + * + * It is possible that busy pages could become available before + * the call to test_pages_isolated, and the range will actually be + * allocated. So, if we fall through be sure to clear ret so that + * -EBUSY is not accidentally used or returned to caller. */ ret = __alloc_contig_migrate_range(&cc, start, end); if (ret && ret != -EBUSY) goto done; + ret =0; /* * Pages from [start, end) are within a MAX_ORDER_NR_PAGES |
