diff options
author | Matthew Wilcox (Oracle) <willy@infradead.org> | 2020-10-16 06:10:15 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-10-16 21:11:19 +0300 |
commit | ab130f9108dcf2062a44f9f0706824ef2e30492e (patch) | |
tree | 07e3283496e2772048141c882b1ef633364dec0f /mm/page_alloc.c | |
parent | 1f0f8c0de09066d23760c1f5fac2cd53b32f1127 (diff) | |
download | linux-ab130f9108dcf2062a44f9f0706824ef2e30492e.tar.xz |
mm: rename page_order() to buddy_order()
The current page_order() can only be called on pages in the buddy
allocator. For compound pages, you have to use compound_order(). This is
confusing and led to a bug, so rename page_order() to buddy_order().
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Link: https://lkml.kernel.org/r/20201001152259.14932-2-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 30 |
1 files changed, 15 insertions, 15 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 3b032dac62e6..ccf615c0627e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -792,7 +792,7 @@ static inline void clear_page_guard(struct zone *zone, struct page *page, unsigned int order, int migratetype) {} #endif -static inline void set_page_order(struct page *page, unsigned int order) +static inline void set_buddy_order(struct page *page, unsigned int order) { set_page_private(page, order); __SetPageBuddy(page); @@ -817,7 +817,7 @@ static inline bool page_is_buddy(struct page *page, struct page *buddy, if (!page_is_guard(buddy) && !PageBuddy(buddy)) return false; - if (page_order(buddy) != order) + if (buddy_order(buddy) != order) return false; /* @@ -1059,7 +1059,7 @@ continue_merging: } done_merging: - set_page_order(page, order); + set_buddy_order(page, order); if (fpi_flags & FPI_TO_TAIL) to_tail = true; @@ -2178,7 +2178,7 @@ static inline void expand(struct zone *zone, struct page *page, continue; add_to_free_list(&page[size], zone, high, migratetype); - set_page_order(&page[size], high); + set_buddy_order(&page[size], high); } } @@ -2392,7 +2392,7 @@ static int move_freepages(struct zone *zone, VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page); VM_BUG_ON_PAGE(page_zone(page) != zone, page); - order = page_order(page); + order = buddy_order(page); move_to_free_list(page, zone, order, migratetype); page += 1 << order; pages_moved += 1 << order; @@ -2516,7 +2516,7 @@ static inline void boost_watermark(struct zone *zone) static void steal_suitable_fallback(struct zone *zone, struct page *page, unsigned int alloc_flags, int start_type, bool whole_block) { - unsigned int current_order = page_order(page); + unsigned int current_order = buddy_order(page); int free_pages, movable_pages, alike_pages; int old_block_type; @@ -8344,7 +8344,7 @@ struct page *has_unmovable_pages(struct zone *zone, struct page *page, */ if (!page_ref_count(page)) { if (PageBuddy(page)) - iter += (1 << page_order(page)) - 1; + iter += (1 << buddy_order(page)) - 1; continue; } @@ -8557,7 +8557,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, } if (outer_start != start) { - order = page_order(pfn_to_page(outer_start)); + order = buddy_order(pfn_to_page(outer_start)); /* * outer_start page could be small order buddy page and @@ -8782,7 +8782,7 @@ void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) BUG_ON(page_count(page)); BUG_ON(!PageBuddy(page)); - order = page_order(page); + order = buddy_order(page); del_page_from_free_list(page, zone, order); pfn += (1 << order); } @@ -8801,7 +8801,7 @@ bool is_free_buddy_page(struct page *page) for (order = 0; order < MAX_ORDER; order++) { struct page *page_head = page - (pfn & ((1 << order) - 1)); - if (PageBuddy(page_head) && page_order(page_head) >= order) + if (PageBuddy(page_head) && buddy_order(page_head) >= order) break; } spin_unlock_irqrestore(&zone->lock, flags); @@ -8838,7 +8838,7 @@ static void break_down_buddy_pages(struct zone *zone, struct page *page, if (current_buddy != target) { add_to_free_list(current_buddy, zone, high, migratetype); - set_page_order(current_buddy, high); + set_buddy_order(current_buddy, high); page = next_page; } } @@ -8858,16 +8858,16 @@ bool take_page_off_buddy(struct page *page) spin_lock_irqsave(&zone->lock, flags); for (order = 0; order < MAX_ORDER; order++) { struct page *page_head = page - (pfn & ((1 << order) - 1)); - int buddy_order = page_order(page_head); + int page_order = buddy_order(page_head); - if (PageBuddy(page_head) && buddy_order >= order) { + if (PageBuddy(page_head) && page_order >= order) { unsigned long pfn_head = page_to_pfn(page_head); int migratetype = get_pfnblock_migratetype(page_head, pfn_head); - del_page_from_free_list(page_head, zone, buddy_order); + del_page_from_free_list(page_head, zone, page_order); break_down_buddy_pages(zone, page_head, page, 0, - buddy_order, migratetype); + page_order, migratetype); ret = true; break; } |