diff options
author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2020-01-11 01:56:04 +0300 |
---|---|---|
committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2020-01-11 01:56:04 +0300 |
commit | 1bdd3e05a0a3b4a97ea88bc46fef8fb265c8b94c (patch) | |
tree | 2244894a9ea0c941a8f32e5f3d196b4ea0eae24b /mm/sparse.c | |
parent | 643dd7416649bea2e8c61d8fdeeefb409a0ca5eb (diff) | |
parent | c79f46a282390e0f5b306007bf7b11a46d529538 (diff) | |
download | linux-1bdd3e05a0a3b4a97ea88bc46fef8fb265c8b94c.tar.xz |
Merge tag 'v5.5-rc5' into next
Sync up with mainline to get SPI "delay" API changes.
Diffstat (limited to 'mm/sparse.c')
-rw-r--r-- | mm/sparse.c | 18 |
1 files changed, 10 insertions, 8 deletions
diff --git a/mm/sparse.c b/mm/sparse.c index f6891c1992b1..b20ab7cdac86 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -458,8 +458,7 @@ struct page __init *__populate_section_memmap(unsigned long pfn, if (map) return map; - map = memblock_alloc_try_nid(size, - PAGE_SIZE, addr, + map = memblock_alloc_try_nid_raw(size, size, addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid); if (!map) panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", @@ -482,10 +481,13 @@ static void __init sparse_buffer_init(unsigned long size, int nid) { phys_addr_t addr = __pa(MAX_DMA_ADDRESS); WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ - sparsemap_buf = - memblock_alloc_try_nid_raw(size, PAGE_SIZE, - addr, - MEMBLOCK_ALLOC_ACCESSIBLE, nid); + /* + * Pre-allocated buffer is mainly used by __populate_section_memmap + * and we want it to be properly aligned to the section size - this is + * especially the case for VMEMMAP which maps memmap to PMDs + */ + sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(), + addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid); sparsemap_buf_end = sparsemap_buf + size; } @@ -647,7 +649,7 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) #endif #ifdef CONFIG_SPARSEMEM_VMEMMAP -static struct page *populate_section_memmap(unsigned long pfn, +static struct page * __meminit populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap) { return __populate_section_memmap(pfn, nr_pages, nid, altmap); @@ -669,7 +671,7 @@ static void free_map_bootmem(struct page *memmap) vmemmap_free(start, end, NULL); } #else -struct page *populate_section_memmap(unsigned long pfn, +struct page * __meminit populate_section_memmap(unsigned long pfn, unsigned long nr_pages, int nid, struct vmem_altmap *altmap) { struct page *page, *ret; |