summaryrefslogtreecommitdiff
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c232
1 files changed, 146 insertions, 86 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index ab41a511e20a..b3c0ff52bb72 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -645,7 +645,7 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
unsigned long pfn;
/*
- * Online the pages in MAX_ORDER aligned chunks. The callback might
+ * Online the pages in MAX_PAGE_ORDER aligned chunks. The callback might
* decide to not expose all pages to the buddy (e.g., expose them
* later). We account all pages as being online and belonging to this
* zone ("present").
@@ -660,12 +660,13 @@ static void online_pages_range(unsigned long start_pfn, unsigned long nr_pages)
* Free to online pages in the largest chunks alignment allows.
*
* __ffs() behaviour is undefined for 0. start == 0 is
- * MAX_ORDER-aligned, Set order to MAX_ORDER for the case.
+ * MAX_PAGE_ORDER-aligned, Set order to MAX_PAGE_ORDER for
+ * the case.
*/
if (pfn)
- order = min_t(int, MAX_ORDER, __ffs(pfn));
+ order = min_t(int, MAX_PAGE_ORDER, __ffs(pfn));
else
- order = MAX_ORDER;
+ order = MAX_PAGE_ORDER;
(*online_page_callback)(pfn_to_page(pfn), order);
pfn += (1UL << order);
@@ -1129,6 +1130,9 @@ void mhp_deinit_memmap_on_memory(unsigned long pfn, unsigned long nr_pages)
kasan_remove_zero_shadow(__va(PFN_PHYS(pfn)), PFN_PHYS(nr_pages));
}
+/*
+ * Must be called with mem_hotplug_lock in write mode.
+ */
int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
@@ -1149,7 +1153,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
!IS_ALIGNED(pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL;
- mem_hotplug_begin();
/* associate pfn range with the zone */
move_pfn_range_to_zone(zone, pfn, nr_pages, NULL, MIGRATE_ISOLATE);
@@ -1208,7 +1211,6 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages,
writeback_set_ratelimit();
memory_notify(MEM_ONLINE, &arg);
- mem_hotplug_done();
return 0;
failed_addition:
@@ -1217,7 +1219,6 @@ failed_addition:
(((unsigned long long) pfn + nr_pages) << PAGE_SHIFT) - 1);
memory_notify(MEM_CANCEL_ONLINE, &arg);
remove_pfn_range_from_zone(zone, pfn, nr_pages);
- mem_hotplug_done();
return ret;
}
@@ -1380,6 +1381,85 @@ static bool mhp_supports_memmap_on_memory(unsigned long size)
return arch_supports_memmap_on_memory(vmemmap_size);
}
+static void __ref remove_memory_blocks_and_altmaps(u64 start, u64 size)
+{
+ unsigned long memblock_size = memory_block_size_bytes();
+ u64 cur_start;
+
+ /*
+ * For memmap_on_memory, the altmaps were added on a per-memblock
+ * basis; we have to process each individual memory block.
+ */
+ for (cur_start = start; cur_start < start + size;
+ cur_start += memblock_size) {
+ struct vmem_altmap *altmap = NULL;
+ struct memory_block *mem;
+
+ mem = find_memory_block(pfn_to_section_nr(PFN_DOWN(cur_start)));
+ if (WARN_ON_ONCE(!mem))
+ continue;
+
+ altmap = mem->altmap;
+ mem->altmap = NULL;
+
+ remove_memory_block_devices(cur_start, memblock_size);
+
+ arch_remove_memory(cur_start, memblock_size, altmap);
+
+ /* Verify that all vmemmap pages have actually been freed. */
+ WARN(altmap->alloc, "Altmap not fully unmapped");
+ kfree(altmap);
+ }
+}
+
+static int create_altmaps_and_memory_blocks(int nid, struct memory_group *group,
+ u64 start, u64 size)
+{
+ unsigned long memblock_size = memory_block_size_bytes();
+ u64 cur_start;
+ int ret;
+
+ for (cur_start = start; cur_start < start + size;
+ cur_start += memblock_size) {
+ struct mhp_params params = { .pgprot =
+ pgprot_mhp(PAGE_KERNEL) };
+ struct vmem_altmap mhp_altmap = {
+ .base_pfn = PHYS_PFN(cur_start),
+ .end_pfn = PHYS_PFN(cur_start + memblock_size - 1),
+ };
+
+ mhp_altmap.free = memory_block_memmap_on_memory_pages();
+ params.altmap = kmemdup(&mhp_altmap, sizeof(struct vmem_altmap),
+ GFP_KERNEL);
+ if (!params.altmap) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /* call arch's memory hotadd */
+ ret = arch_add_memory(nid, cur_start, memblock_size, &params);
+ if (ret < 0) {
+ kfree(params.altmap);
+ goto out;
+ }
+
+ /* create memory block devices after memory was added */
+ ret = create_memory_block_devices(cur_start, memblock_size,
+ params.altmap, group);
+ if (ret) {
+ arch_remove_memory(cur_start, memblock_size, NULL);
+ kfree(params.altmap);
+ goto out;
+ }
+ }
+
+ return 0;
+out:
+ if (ret && cur_start != start)
+ remove_memory_blocks_and_altmaps(start, cur_start - start);
+ return ret;
+}
+
/*
* NOTE: The caller must call lock_device_hotplug() to serialize hotplug
* and online/offline operations (triggered e.g. by sysfs).
@@ -1390,10 +1470,6 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
{
struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
enum memblock_flags memblock_flags = MEMBLOCK_NONE;
- struct vmem_altmap mhp_altmap = {
- .base_pfn = PHYS_PFN(res->start),
- .end_pfn = PHYS_PFN(res->end),
- };
struct memory_group *group = NULL;
u64 start, size;
bool new_node = false;
@@ -1436,30 +1512,22 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
/*
* Self hosted memmap array
*/
- if (mhp_flags & MHP_MEMMAP_ON_MEMORY) {
- if (mhp_supports_memmap_on_memory(size)) {
- mhp_altmap.free = memory_block_memmap_on_memory_pages();
- params.altmap = kmalloc(sizeof(struct vmem_altmap), GFP_KERNEL);
- if (!params.altmap) {
- ret = -ENOMEM;
- goto error;
- }
+ if ((mhp_flags & MHP_MEMMAP_ON_MEMORY) &&
+ mhp_supports_memmap_on_memory(memory_block_size_bytes())) {
+ ret = create_altmaps_and_memory_blocks(nid, group, start, size);
+ if (ret)
+ goto error;
+ } else {
+ ret = arch_add_memory(nid, start, size, &params);
+ if (ret < 0)
+ goto error;
- memcpy(params.altmap, &mhp_altmap, sizeof(mhp_altmap));
+ /* create memory block devices after memory was added */
+ ret = create_memory_block_devices(start, size, NULL, group);
+ if (ret) {
+ arch_remove_memory(start, size, params.altmap);
+ goto error;
}
- /* fallback to not using altmap */
- }
-
- /* call arch's memory hotadd */
- ret = arch_add_memory(nid, start, size, &params);
- if (ret < 0)
- goto error_free;
-
- /* create memory block devices after memory was added */
- ret = create_memory_block_devices(start, size, params.altmap, group);
- if (ret) {
- arch_remove_memory(start, size, NULL);
- goto error_free;
}
if (new_node) {
@@ -1496,8 +1564,6 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
walk_memory_blocks(start, size, NULL, online_memory_block);
return ret;
-error_free:
- kfree(params.altmap);
error:
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
memblock_remove(start, size);
@@ -1863,6 +1929,9 @@ static int count_system_ram_pages_cb(unsigned long start_pfn,
return 0;
}
+/*
+ * Must be called with mem_hotplug_lock in write mode.
+ */
int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
struct zone *zone, struct memory_group *group)
{
@@ -1885,8 +1954,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
!IS_ALIGNED(start_pfn + nr_pages, PAGES_PER_SECTION)))
return -EINVAL;
- mem_hotplug_begin();
-
/*
* Don't allow to offline memory blocks that contain holes.
* Consequently, memory blocks with holes can never get onlined
@@ -2031,7 +2098,6 @@ int __ref offline_pages(unsigned long start_pfn, unsigned long nr_pages,
memory_notify(MEM_OFFLINE, &arg);
remove_pfn_range_from_zone(zone, start_pfn, nr_pages);
- mem_hotplug_done();
return 0;
failed_removal_isolated:
@@ -2046,7 +2112,6 @@ failed_removal:
(unsigned long long) start_pfn << PAGE_SHIFT,
((unsigned long long) end_pfn << PAGE_SHIFT) - 1,
reason);
- mem_hotplug_done();
return ret;
}
@@ -2068,17 +2133,13 @@ static int check_memblock_offlined_cb(struct memory_block *mem, void *arg)
return 0;
}
-static int test_has_altmap_cb(struct memory_block *mem, void *arg)
+static int count_memory_range_altmaps_cb(struct memory_block *mem, void *arg)
{
- struct memory_block **mem_ptr = (struct memory_block **)arg;
- /*
- * return the memblock if we have altmap
- * and break callback.
- */
- if (mem->altmap) {
- *mem_ptr = mem;
- return 1;
- }
+ u64 *num_altmaps = (u64 *)arg;
+
+ if (mem->altmap)
+ *num_altmaps += 1;
+
return 0;
}
@@ -2152,11 +2213,29 @@ void try_offline_node(int nid)
}
EXPORT_SYMBOL(try_offline_node);
+static int memory_blocks_have_altmaps(u64 start, u64 size)
+{
+ u64 num_memblocks = size / memory_block_size_bytes();
+ u64 num_altmaps = 0;
+
+ if (!mhp_memmap_on_memory())
+ return 0;
+
+ walk_memory_blocks(start, size, &num_altmaps,
+ count_memory_range_altmaps_cb);
+
+ if (num_altmaps == 0)
+ return 0;
+
+ if (WARN_ON_ONCE(num_memblocks != num_altmaps))
+ return -EINVAL;
+
+ return 1;
+}
+
static int __ref try_remove_memory(u64 start, u64 size)
{
- struct memory_block *mem;
- int rc = 0, nid = NUMA_NO_NODE;
- struct vmem_altmap *altmap = NULL;
+ int rc, nid = NUMA_NO_NODE;
BUG_ON(check_hotplug_memory_range(start, size));
@@ -2173,45 +2252,26 @@ static int __ref try_remove_memory(u64 start, u64 size)
if (rc)
return rc;
- /*
- * We only support removing memory added with MHP_MEMMAP_ON_MEMORY in
- * the same granularity it was added - a single memory block.
- */
- if (mhp_memmap_on_memory()) {
- rc = walk_memory_blocks(start, size, &mem, test_has_altmap_cb);
- if (rc) {
- if (size != memory_block_size_bytes()) {
- pr_warn("Refuse to remove %#llx - %#llx,"
- "wrong granularity\n",
- start, start + size);
- return -EINVAL;
- }
- altmap = mem->altmap;
- /*
- * Mark altmap NULL so that we can add a debug
- * check on memblock free.
- */
- mem->altmap = NULL;
- }
- }
-
/* remove memmap entry */
firmware_map_remove(start, start + size, "System RAM");
- /*
- * Memory block device removal under the device_hotplug_lock is
- * a barrier against racing online attempts.
- */
- remove_memory_block_devices(start, size);
-
mem_hotplug_begin();
- arch_remove_memory(start, size, altmap);
-
- /* Verify that all vmemmap pages have actually been freed. */
- if (altmap) {
- WARN(altmap->alloc, "Altmap not fully unmapped");
- kfree(altmap);
+ rc = memory_blocks_have_altmaps(start, size);
+ if (rc < 0) {
+ mem_hotplug_done();
+ return rc;
+ } else if (!rc) {
+ /*
+ * Memory block device removal under the device_hotplug_lock is
+ * a barrier against racing online attempts.
+ * No altmaps present, do the removal directly
+ */
+ remove_memory_block_devices(start, size);
+ arch_remove_memory(start, size, NULL);
+ } else {
+ /* all memblocks in the range have altmaps */
+ remove_memory_blocks_and_altmaps(start, size);
}
if (IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK)) {