diff options
author | Joel Stanley <joel@jms.id.au> | 2019-03-15 09:00:28 +0300 |
---|---|---|
committer | Joel Stanley <joel@jms.id.au> | 2019-03-15 09:01:07 +0300 |
commit | 9bd1e03005e74ecfe16f58143714f25793ae1c5d (patch) | |
tree | 557f67e781effd12a2fdf1816b757c61be8d832e /mm/memory_hotplug.c | |
parent | 7859d04be772a1d185de335a8e9d38ac03a89f3f (diff) | |
parent | ce194fa2b267e2018f42442347d90df01c4071d6 (diff) | |
download | linux-dev-4.19.tar.xz |
Merge tag 'v4.19.29' into dev-4.19dev-4.19
This is the 4.19.29 stable release
Signed-off-by: Joel Stanley <joel@jms.id.au>
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r-- | mm/memory_hotplug.c | 29 |
1 files changed, 18 insertions, 11 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index c6119ad3561e..156991edec2a 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1213,11 +1213,13 @@ static inline int pageblock_free(struct page *page) return PageBuddy(page) && page_order(page) >= pageblock_order; } -/* Return the start of the next active pageblock after a given page */ -static struct page *next_active_pageblock(struct page *page) +/* Return the pfn of the start of the next active pageblock after a given pfn */ +static unsigned long next_active_pageblock(unsigned long pfn) { + struct page *page = pfn_to_page(pfn); + /* Ensure the starting page is pageblock-aligned */ - BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1)); + BUG_ON(pfn & (pageblock_nr_pages - 1)); /* If the entire pageblock is free, move to the end of free page */ if (pageblock_free(page)) { @@ -1225,16 +1227,16 @@ static struct page *next_active_pageblock(struct page *page) /* be careful. we don't have locks, page_order can be changed.*/ order = page_order(page); if ((order < MAX_ORDER) && (order >= pageblock_order)) - return page + (1 << order); + return pfn + (1 << order); } - return page + pageblock_nr_pages; + return pfn + pageblock_nr_pages; } -static bool is_pageblock_removable_nolock(struct page *page) +static bool is_pageblock_removable_nolock(unsigned long pfn) { + struct page *page = pfn_to_page(pfn); struct zone *zone; - unsigned long pfn; /* * We have to be careful here because we are iterating over memory @@ -1257,12 +1259,14 @@ static bool is_pageblock_removable_nolock(struct page *page) /* Checks if this range of memory is likely to be hot-removable. */ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages) { - struct page *page = pfn_to_page(start_pfn); - struct page *end_page = page + nr_pages; + unsigned long end_pfn, pfn; + + end_pfn = min(start_pfn + nr_pages, + zone_end_pfn(page_zone(pfn_to_page(start_pfn)))); /* Check the starting page of each pageblock within the range */ - for (; page < end_page; page = next_active_pageblock(page)) { - if (!is_pageblock_removable_nolock(page)) + for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) { + if (!is_pageblock_removable_nolock(pfn)) return false; cond_resched(); } @@ -1298,6 +1302,9 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, i++; if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn) continue; + /* Check if we got outside of the zone */ + if (zone && !zone_spans_pfn(zone, pfn + i)) + return 0; page = pfn_to_page(pfn + i); if (zone && page_zone(page) != zone) return 0; |