diff options
author | Pavel Tatashin <pavel.tatashin@microsoft.com> | 2018-10-27 01:10:21 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-27 02:38:15 +0300 |
commit | ec393a0f014eaf688a3dbe8c8a4cbb52d7f535f9 (patch) | |
tree | 55f96f2ba77f8adec219a61800e1ad7eafcba470 /mm/page_alloc.c | |
parent | 907ec5fca3dc38d37737de826f06f25b063aa08e (diff) | |
download | linux-ec393a0f014eaf688a3dbe8c8a4cbb52d7f535f9.tar.xz |
mm: return zero_resv_unavail optimization
When checking for valid pfns in zero_resv_unavail(), it is not necessary
to verify that pfns within pageblock_nr_pages ranges are valid, only the
first one needs to be checked. This is because memory for pages are
allocated in contiguous chunks that contain pageblock_nr_pages struct
pages.
Link: http://lkml.kernel.org/r/20181002143821.5112-3-msys.mizuma@gmail.com
Signed-off-by: Pavel Tatashin <pavel.tatashin@microsoft.com>
Signed-off-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Reviewed-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
Acked-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Reviewed-by: Oscar Salvador <osalvador@suse.de>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 46 |
1 files changed, 26 insertions, 20 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6d863c5afa08..863d46da6586 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6509,6 +6509,29 @@ void __init free_area_init_node(int nid, unsigned long *zones_size, } #if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP) + +/* + * Zero all valid struct pages in range [spfn, epfn), return number of struct + * pages zeroed + */ +static u64 zero_pfn_range(unsigned long spfn, unsigned long epfn) +{ + unsigned long pfn; + u64 pgcnt = 0; + + for (pfn = spfn; pfn < epfn; pfn++) { + if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) { + pfn = ALIGN_DOWN(pfn, pageblock_nr_pages) + + pageblock_nr_pages - 1; + continue; + } + mm_zero_struct_page(pfn_to_page(pfn)); + pgcnt++; + } + + return pgcnt; +} + /* * Only struct pages that are backed by physical memory are zeroed and * initialized by going through __init_single_page(). But, there are some @@ -6524,7 +6547,6 @@ void __init free_area_init_node(int nid, unsigned long *zones_size, void __init zero_resv_unavail(void) { phys_addr_t start, end; - unsigned long pfn; u64 i, pgcnt; phys_addr_t next = 0; @@ -6534,34 +6556,18 @@ void __init zero_resv_unavail(void) pgcnt = 0; for_each_mem_range(i, &memblock.memory, NULL, NUMA_NO_NODE, MEMBLOCK_NONE, &start, &end, NULL) { - if (next < start) { - for (pfn = PFN_DOWN(next); pfn < PFN_UP(start); pfn++) { - if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) - continue; - mm_zero_struct_page(pfn_to_page(pfn)); - pgcnt++; - } - } + if (next < start) + pgcnt += zero_pfn_range(PFN_DOWN(next), PFN_UP(start)); next = end; } - for (pfn = PFN_DOWN(next); pfn < max_pfn; pfn++) { - if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) - continue; - mm_zero_struct_page(pfn_to_page(pfn)); - pgcnt++; - } - + pgcnt += zero_pfn_range(PFN_DOWN(next), max_pfn); /* * Struct pages that do not have backing memory. This could be because * firmware is using some of this memory, or for some other reasons. - * Once memblock is changed so such behaviour is not allowed: i.e. - * list of "reserved" memory must be a subset of list of "memory", then - * this code can be removed. */ if (pgcnt) pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt); - } #endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */ |