diff options
Diffstat (limited to 'mm/memblock.c')
-rw-r--r-- | mm/memblock.c | 254 |
1 files changed, 175 insertions, 79 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index cc16d70b8333..237944479d25 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -27,6 +27,61 @@ #include "internal.h" +/** + * DOC: memblock overview + * + * Memblock is a method of managing memory regions during the early + * boot period when the usual kernel memory allocators are not up and + * running. + * + * Memblock views the system memory as collections of contiguous + * regions. There are several types of these collections: + * + * * ``memory`` - describes the physical memory available to the + * kernel; this may differ from the actual physical memory installed + * in the system, for instance when the memory is restricted with + * ``mem=`` command line parameter + * * ``reserved`` - describes the regions that were allocated + * * ``physmap`` - describes the actual physical memory regardless of + * the possible restrictions; the ``physmap`` type is only available + * on some architectures. + * + * Each region is represented by :c:type:`struct memblock_region` that + * defines the region extents, its attributes and NUMA node id on NUMA + * systems. Every memory type is described by the :c:type:`struct + * memblock_type` which contains an array of memory regions along with + * the allocator metadata. The memory types are nicely wrapped with + * :c:type:`struct memblock`. This structure is statically initialzed + * at build time. The region arrays for the "memory" and "reserved" + * types are initially sized to %INIT_MEMBLOCK_REGIONS and for the + * "physmap" type to %INIT_PHYSMEM_REGIONS. + * The :c:func:`memblock_allow_resize` enables automatic resizing of + * the region arrays during addition of new regions. This feature + * should be used with care so that memory allocated for the region + * array will not overlap with areas that should be reserved, for + * example initrd. + * + * The early architecture setup should tell memblock what the physical + * memory layout is by using :c:func:`memblock_add` or + * :c:func:`memblock_add_node` functions. The first function does not + * assign the region to a NUMA node and it is appropriate for UMA + * systems. Yet, it is possible to use it on NUMA systems as well and + * assign the region to a NUMA node later in the setup process using + * :c:func:`memblock_set_node`. The :c:func:`memblock_add_node` + * performs such an assignment directly. + * + * Once memblock is setup the memory can be allocated using either + * memblock or bootmem APIs. + * + * As the system boot progresses, the architecture specific + * :c:func:`mem_init` function frees all the memory to the buddy page + * allocator. + * + * If an architecure enables %CONFIG_ARCH_DISCARD_MEMBLOCK, the + * memblock data structures will be discarded after the system + * initialization compltes. + */ + static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; #ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP @@ -61,7 +116,7 @@ static int memblock_can_resize __initdata_memblock; static int memblock_memory_in_slab __initdata_memblock = 0; static int memblock_reserved_in_slab __initdata_memblock = 0; -ulong __init_memblock choose_memblock_flags(void) +enum memblock_flags __init_memblock choose_memblock_flags(void) { return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE; } @@ -93,10 +148,11 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type, return i < type->cnt; } -/* +/** * __memblock_find_range_bottom_up - find free area utility in bottom-up * @start: start of candidate range - * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} + * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or + * %MEMBLOCK_ALLOC_ACCESSIBLE * @size: size of free area to find * @align: alignment of free area to find * @nid: nid of the free area to find, %NUMA_NO_NODE for any node @@ -104,13 +160,13 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type, * * Utility called from memblock_find_in_range_node(), find free area bottom-up. * - * RETURNS: + * Return: * Found address on success, 0 on failure. */ static phys_addr_t __init_memblock __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align, int nid, - ulong flags) + enum memblock_flags flags) { phys_addr_t this_start, this_end, cand; u64 i; @@ -130,7 +186,8 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, /** * __memblock_find_range_top_down - find free area utility, in top-down * @start: start of candidate range - * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} + * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or + * %MEMBLOCK_ALLOC_ACCESSIBLE * @size: size of free area to find * @align: alignment of free area to find * @nid: nid of the free area to find, %NUMA_NO_NODE for any node @@ -138,13 +195,13 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, * * Utility called from memblock_find_in_range_node(), find free area top-down. * - * RETURNS: + * Return: * Found address on success, 0 on failure. */ static phys_addr_t __init_memblock __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, phys_addr_t size, phys_addr_t align, int nid, - ulong flags) + enum memblock_flags flags) { phys_addr_t this_start, this_end, cand; u64 i; @@ -170,7 +227,8 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, * @size: size of free area to find * @align: alignment of free area to find * @start: start of candidate range - * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} + * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or + * %MEMBLOCK_ALLOC_ACCESSIBLE * @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @flags: pick from blocks based on memory attributes * @@ -184,12 +242,13 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, * * If bottom-up allocation failed, will try to allocate memory top-down. * - * RETURNS: + * Return: * Found address on success, 0 on failure. */ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, phys_addr_t start, - phys_addr_t end, int nid, ulong flags) + phys_addr_t end, int nid, + enum memblock_flags flags) { phys_addr_t kernel_end, ret; @@ -228,7 +287,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, * so we use WARN_ONCE() here to see the stack trace if * fail happens. */ - WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n"); + WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE), + "memblock: bottom-up allocation failed, memory hotremove may be affected\n"); } return __memblock_find_range_top_down(start, end, size, align, nid, @@ -238,13 +298,14 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, /** * memblock_find_in_range - find free area in given range * @start: start of candidate range - * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} + * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or + * %MEMBLOCK_ALLOC_ACCESSIBLE * @size: size of free area to find * @align: alignment of free area to find * * Find @size free area aligned to @align in the specified range. * - * RETURNS: + * Return: * Found address on success, 0 on failure. */ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, @@ -252,7 +313,7 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t align) { phys_addr_t ret; - ulong flags = choose_memblock_flags(); + enum memblock_flags flags = choose_memblock_flags(); again: ret = memblock_find_in_range_node(size, align, start, end, @@ -288,7 +349,7 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK /** - * Discard memory and reserved arrays if they were allocated + * memblock_discard - discard memory and reserved arrays if they were allocated */ void __init memblock_discard(void) { @@ -318,11 +379,11 @@ void __init memblock_discard(void) * * Double the size of the @type regions array. If memblock is being used to * allocate memory for a new reserved regions array and there is a previously - * allocated memory range [@new_area_start,@new_area_start+@new_area_size] + * allocated memory range [@new_area_start, @new_area_start + @new_area_size] * waiting to be reserved, ensure the memory used by the new array does * not overlap. * - * RETURNS: + * Return: * 0 on success, -1 on failure. */ static int __init_memblock memblock_double_array(struct memblock_type *type, @@ -331,7 +392,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, { struct memblock_region *new_array, *old_array; phys_addr_t old_alloc_size, new_alloc_size; - phys_addr_t old_size, new_size, addr; + phys_addr_t old_size, new_size, addr, new_end; int use_slab = slab_is_available(); int *in_slab; @@ -392,9 +453,9 @@ static int __init_memblock memblock_double_array(struct memblock_type *type, return -1; } - memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", - type->name, type->max * 2, (u64)addr, - (u64)addr + new_size - 1); + new_end = addr + new_size - 1; + memblock_dbg("memblock: %s is doubled to %ld at [%pa-%pa]", + type->name, type->max * 2, &addr, &new_end); /* * Found space, we now need to move the array over before we add the @@ -467,13 +528,14 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type) * @nid: node id of the new region * @flags: flags of the new region * - * Insert new memblock region [@base,@base+@size) into @type at @idx. + * Insert new memblock region [@base, @base + @size) into @type at @idx. * @type must already have extra room to accommodate the new region. */ static void __init_memblock memblock_insert_region(struct memblock_type *type, int idx, phys_addr_t base, phys_addr_t size, - int nid, unsigned long flags) + int nid, + enum memblock_flags flags) { struct memblock_region *rgn = &type->regions[idx]; @@ -495,17 +557,17 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type, * @nid: nid of the new region * @flags: flags of the new region * - * Add new memblock region [@base,@base+@size) into @type. The new region + * Add new memblock region [@base, @base + @size) into @type. The new region * is allowed to overlap with existing ones - overlaps don't affect already * existing regions. @type is guaranteed to be minimal (all neighbouring * compatible regions are merged) after the addition. * - * RETURNS: + * Return: * 0 on success, -errno on failure. */ int __init_memblock memblock_add_range(struct memblock_type *type, phys_addr_t base, phys_addr_t size, - int nid, unsigned long flags) + int nid, enum memblock_flags flags) { bool insert = false; phys_addr_t obase = base; @@ -589,12 +651,35 @@ repeat: } } +/** + * memblock_add_node - add new memblock region within a NUMA node + * @base: base address of the new region + * @size: size of the new region + * @nid: nid of the new region + * + * Add new memblock region [@base, @base + @size) to the "memory" + * type. See memblock_add_range() description for mode details + * + * Return: + * 0 on success, -errno on failure. + */ int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size, int nid) { return memblock_add_range(&memblock.memory, base, size, nid, 0); } +/** + * memblock_add - add new memblock region + * @base: base address of the new region + * @size: size of the new region + * + * Add new memblock region [@base, @base + @size) to the "memory" + * type. See memblock_add_range() description for mode details + * + * Return: + * 0 on success, -errno on failure. + */ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) { phys_addr_t end = base + size - 1; @@ -614,11 +699,11 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) * @end_rgn: out parameter for the end of isolated region * * Walk @type and ensure that regions don't cross the boundaries defined by - * [@base,@base+@size). Crossing regions are split at the boundaries, + * [@base, @base + @size). Crossing regions are split at the boundaries, * which may create at most two more regions. The index of the first * region inside the range is returned in *@start_rgn and end in *@end_rgn. * - * RETURNS: + * Return: * 0 on success, -errno on failure. */ static int __init_memblock memblock_isolate_range(struct memblock_type *type, @@ -729,10 +814,15 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) } /** + * memblock_setclr_flag - set or clear flag for a memory region + * @base: base address of the region + * @size: size of the region + * @set: set or clear the flag + * @flag: the flag to udpate * * This function isolates region [@base, @base + @size), and sets/clears flag * - * Return 0 on success, -errno on failure. + * Return: 0 on success, -errno on failure. */ static int __init_memblock memblock_setclr_flag(phys_addr_t base, phys_addr_t size, int set, int flag) @@ -759,7 +849,7 @@ static int __init_memblock memblock_setclr_flag(phys_addr_t base, * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on success, -errno on failure. + * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) { @@ -771,7 +861,7 @@ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on success, -errno on failure. + * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) { @@ -783,7 +873,7 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on success, -errno on failure. + * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) { @@ -797,7 +887,7 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on success, -errno on failure. + * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) { @@ -809,7 +899,7 @@ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) * @base: the base phys addr of the region * @size: the size of the region * - * Return 0 on success, -errno on failure. + * Return: 0 on success, -errno on failure. */ int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) { @@ -874,7 +964,8 @@ void __init_memblock __next_reserved_mem_region(u64 *idx, * As both region arrays are sorted, the function advances the two indices * in lockstep and returns each intersection. */ -void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, +void __init_memblock __next_mem_range(u64 *idx, int nid, + enum memblock_flags flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, @@ -969,9 +1060,6 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, /** * __next_mem_range_rev - generic next function for for_each_*_range_rev() * - * Finds the next range from type_a which is not marked as unsuitable - * in type_b. - * * @idx: pointer to u64 loop variable * @nid: node selector, %NUMA_NO_NODE for all nodes * @flags: pick from blocks based on memory attributes @@ -981,9 +1069,13 @@ void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags, * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL * @out_nid: ptr to int for nid of the range, can be %NULL * + * Finds the next range from type_a which is not marked as unsuitable + * in type_b. + * * Reverse of __next_mem_range(). */ -void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags, +void __init_memblock __next_mem_range_rev(u64 *idx, int nid, + enum memblock_flags flags, struct memblock_type *type_a, struct memblock_type *type_b, phys_addr_t *out_start, @@ -1115,10 +1207,10 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid, * @type: memblock type to set node ID for * @nid: node ID to set * - * Set the nid of memblock @type regions in [@base,@base+@size) to @nid. + * Set the nid of memblock @type regions in [@base, @base + @size) to @nid. * Regions which cross the area boundaries are split as necessary. * - * RETURNS: + * Return: * 0 on success, -errno on failure. */ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, @@ -1141,7 +1233,8 @@ int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, phys_addr_t align, phys_addr_t start, - phys_addr_t end, int nid, ulong flags) + phys_addr_t end, int nid, + enum memblock_flags flags) { phys_addr_t found; @@ -1163,7 +1256,7 @@ static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size, phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t start, phys_addr_t end, - ulong flags) + enum memblock_flags flags) { return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE, flags); @@ -1171,14 +1264,14 @@ phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr, - int nid, ulong flags) + int nid, enum memblock_flags flags) { return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags); } phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) { - ulong flags = choose_memblock_flags(); + enum memblock_flags flags = choose_memblock_flags(); phys_addr_t ret; again: @@ -1225,6 +1318,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); } +#if defined(CONFIG_NO_BOOTMEM) /** * memblock_virt_alloc_internal - allocate boot memory block * @size: size of memory block to be allocated in bytes @@ -1241,7 +1335,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i * The allocation is performed from memory region limited by * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. * - * The memory block is aligned on SMP_CACHE_BYTES if @align == 0. + * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0. * * The phys address of allocated boot memory block is converted to virtual and * allocated memory is reset to 0. @@ -1249,7 +1343,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i * In addition, function sets the min_count to 0 using kmemleak_alloc for * allocated boot memory block, so that it is never reported as leaks. * - * RETURNS: + * Return: * Virtual address of allocated memory block on success, NULL on failure. */ static void * __init memblock_virt_alloc_internal( @@ -1259,7 +1353,7 @@ static void * __init memblock_virt_alloc_internal( { phys_addr_t alloc; void *ptr; - ulong flags = choose_memblock_flags(); + enum memblock_flags flags = choose_memblock_flags(); if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n")) nid = NUMA_NO_NODE; @@ -1334,7 +1428,7 @@ done: * info), if enabled. Does not zero allocated memory, does not panic if request * cannot be satisfied. * - * RETURNS: + * Return: * Virtual address of allocated memory block on success, NULL on failure. */ void * __init memblock_virt_alloc_try_nid_raw( @@ -1344,9 +1438,9 @@ void * __init memblock_virt_alloc_try_nid_raw( { void *ptr; - memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", - __func__, (u64)size, (u64)align, nid, (u64)min_addr, - (u64)max_addr, (void *)_RET_IP_); + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", + __func__, (u64)size, (u64)align, nid, &min_addr, + &max_addr, (void *)_RET_IP_); ptr = memblock_virt_alloc_internal(size, align, min_addr, max_addr, nid); @@ -1371,7 +1465,7 @@ void * __init memblock_virt_alloc_try_nid_raw( * Public function, provides additional debug information (including caller * info), if enabled. This function zeroes the allocated memory. * - * RETURNS: + * Return: * Virtual address of allocated memory block on success, NULL on failure. */ void * __init memblock_virt_alloc_try_nid_nopanic( @@ -1381,9 +1475,9 @@ void * __init memblock_virt_alloc_try_nid_nopanic( { void *ptr; - memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", - __func__, (u64)size, (u64)align, nid, (u64)min_addr, - (u64)max_addr, (void *)_RET_IP_); + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", + __func__, (u64)size, (u64)align, nid, &min_addr, + &max_addr, (void *)_RET_IP_); ptr = memblock_virt_alloc_internal(size, align, min_addr, max_addr, nid); @@ -1407,7 +1501,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic( * which provides debug information (including caller info), if enabled, * and panics if the request can not be satisfied. * - * RETURNS: + * Return: * Virtual address of allocated memory block on success, NULL on failure. */ void * __init memblock_virt_alloc_try_nid( @@ -1417,9 +1511,9 @@ void * __init memblock_virt_alloc_try_nid( { void *ptr; - memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n", - __func__, (u64)size, (u64)align, nid, (u64)min_addr, - (u64)max_addr, (void *)_RET_IP_); + memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa %pF\n", + __func__, (u64)size, (u64)align, nid, &min_addr, + &max_addr, (void *)_RET_IP_); ptr = memblock_virt_alloc_internal(size, align, min_addr, max_addr, nid); if (ptr) { @@ -1427,11 +1521,11 @@ void * __init memblock_virt_alloc_try_nid( return ptr; } - panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n", - __func__, (u64)size, (u64)align, nid, (u64)min_addr, - (u64)max_addr); + panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=%pa max_addr=%pa\n", + __func__, (u64)size, (u64)align, nid, &min_addr, &max_addr); return NULL; } +#endif /** * __memblock_free_early - free boot memory block @@ -1443,16 +1537,17 @@ void * __init memblock_virt_alloc_try_nid( */ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) { - memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", - __func__, (u64)base, (u64)base + size - 1, - (void *)_RET_IP_); + phys_addr_t end = base + size - 1; + + memblock_dbg("%s: [%pa-%pa] %pF\n", + __func__, &base, &end, (void *)_RET_IP_); kmemleak_free_part_phys(base, size); memblock_remove_range(&memblock.reserved, base, size); } -/* +/** * __memblock_free_late - free bootmem block pages directly to buddy allocator - * @addr: phys starting address of the boot memory block + * @base: phys starting address of the boot memory block * @size: size of the boot memory block in bytes * * This is only useful when the bootmem allocator has already been torn @@ -1461,11 +1556,11 @@ void __init __memblock_free_early(phys_addr_t base, phys_addr_t size) */ void __init __memblock_free_late(phys_addr_t base, phys_addr_t size) { - u64 cursor, end; + phys_addr_t cursor, end; - memblock_dbg("%s: [%#016llx-%#016llx] %pF\n", - __func__, (u64)base, (u64)base + size - 1, - (void *)_RET_IP_); + end = base + size - 1; + memblock_dbg("%s: [%pa-%pa] %pF\n", + __func__, &base, &end, (void *)_RET_IP_); kmemleak_free_part_phys(base, size); cursor = PFN_UP(base); end = PFN_DOWN(base + size); @@ -1664,9 +1759,9 @@ int __init_memblock memblock_search_pfn_nid(unsigned long pfn, * @base: base of region to check * @size: size of region to check * - * Check if the region [@base, @base+@size) is a subset of a memory block. + * Check if the region [@base, @base + @size) is a subset of a memory block. * - * RETURNS: + * Return: * 0 if false, non-zero if true */ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) @@ -1685,9 +1780,10 @@ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t siz * @base: base of region to check * @size: size of region to check * - * Check if the region [@base, @base+@size) intersects a reserved memory block. + * Check if the region [@base, @base + @size) intersects a reserved + * memory block. * - * RETURNS: + * Return: * True if they intersect, false if not. */ bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) @@ -1734,7 +1830,7 @@ phys_addr_t __init_memblock memblock_get_current_limit(void) static void __init_memblock memblock_dump(struct memblock_type *type) { phys_addr_t base, end, size; - unsigned long flags; + enum memblock_flags flags; int idx; struct memblock_region *rgn; @@ -1752,7 +1848,7 @@ static void __init_memblock memblock_dump(struct memblock_type *type) snprintf(nid_buf, sizeof(nid_buf), " on node %d", memblock_get_region_node(rgn)); #endif - pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n", + pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#x\n", type->name, idx, &base, &end, &size, nid_buf, flags); } } |