summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMike Rapoport (Microsoft) <rppt@kernel.org>2026-03-23 10:48:33 +0300
committerMike Rapoport (Microsoft) <rppt@kernel.org>2026-04-01 11:20:15 +0300
commit7fbc5e26123e5fee1f0eb59e6fabf5ce4cf4f475 (patch)
tree0bf3e12f97e020d9af82aafe4bca4d6f5ab8b89f
parentb8de9573e6aea8e0be666288ee4427eb07369187 (diff)
downloadlinux-7fbc5e26123e5fee1f0eb59e6fabf5ce4cf4f475.tar.xz
memblock: extract page freeing from free_reserved_area() into a helper
There are two functions that release pages to the buddy allocator late in the boot: free_reserved_area() and memblock_free_late(). Currently they are using different underlying functionality, free_reserved_area() runs each page being freed via free_reserved_page() and memblock_free_late() uses memblock_free_pages() -> __free_pages_core(), but in the end they both boil down to a loop that frees a range page by page. Extract the loop frees pages from free_reserved_area() into a helper and use that helper in memblock_free_late(). Link: https://patch.msgid.link/20260323074836.3653702-7-rppt@kernel.org Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
-rw-r--r--mm/memblock.c55
1 files changed, 29 insertions, 26 deletions
diff --git a/mm/memblock.c b/mm/memblock.c
index a42ec6a76ea0..68a72bd4c8bd 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -894,26 +894,12 @@ int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
return memblock_remove_range(&memblock.memory, base, size);
}
-unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
+static unsigned long __free_reserved_area(phys_addr_t start, phys_addr_t end,
+ int poison)
{
- phys_addr_t start_pa, end_pa;
unsigned long pages = 0, pfn;
- /*
- * end is the first address past the region and it may be beyond what
- * __pa() or __pa_symbol() can handle.
- * Use the address included in the range for the conversion and add
- * back 1 afterwards.
- */
- if (__is_kernel((unsigned long)start)) {
- start_pa = __pa_symbol(start);
- end_pa = __pa_symbol(end - 1) + 1;
- } else {
- start_pa = __pa(start);
- end_pa = __pa(end - 1) + 1;
- }
-
- for_each_valid_pfn(pfn, PFN_UP(start_pa), PFN_DOWN(end_pa)) {
+ for_each_valid_pfn(pfn, PFN_UP(start), PFN_DOWN(end)) {
struct page *page = pfn_to_page(pfn);
void *direct_map_addr;
@@ -935,7 +921,29 @@ unsigned long free_reserved_area(void *start, void *end, int poison, const char
free_reserved_page(page);
pages++;
}
+ return pages;
+}
+
+unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
+{
+ phys_addr_t start_pa, end_pa;
+ unsigned long pages;
+
+ /*
+ * end is the first address past the region and it may be beyond what
+ * __pa() or __pa_symbol() can handle.
+ * Use the address included in the range for the conversion and add back
+ * 1 afterwards.
+ */
+ if (__is_kernel((unsigned long)start)) {
+ start_pa = __pa_symbol(start);
+ end_pa = __pa_symbol(end - 1) + 1;
+ } else {
+ start_pa = __pa(start);
+ end_pa = __pa(end - 1) + 1;
+ }
+ pages = __free_reserved_area(start_pa, end_pa, poison);
if (pages && s)
pr_info("Freeing %s memory: %ldK\n", s, K(pages));
@@ -1811,20 +1819,15 @@ void *__init __memblock_alloc_or_panic(phys_addr_t size, phys_addr_t align,
*/
void __init memblock_free_late(phys_addr_t base, phys_addr_t size)
{
- phys_addr_t cursor, end;
+ phys_addr_t end = base + size - 1;
- end = base + size - 1;
memblock_dbg("%s: [%pa-%pa] %pS\n",
__func__, &base, &end, (void *)_RET_IP_);
- kmemleak_free_part_phys(base, size);
- cursor = PFN_UP(base);
- end = PFN_DOWN(base + size);
- for (; cursor < end; cursor++) {
- memblock_free_pages(cursor, 0);
- totalram_pages_inc();
- }
+ kmemleak_free_part_phys(base, size);
+ __free_reserved_area(base, base + size, -1);
}
+
/*
* Remaining API functions
*/