diff options
Diffstat (limited to 'mm/memblock.c')
| -rw-r--r-- | mm/memblock.c | 124 | 
1 files changed, 111 insertions, 13 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index 0ac412a0a7ee..53e477bb5558 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -20,6 +20,8 @@  #include <linux/seq_file.h>  #include <linux/memblock.h> +#include <asm-generic/sections.h> +  static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;  static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock; @@ -32,6 +34,7 @@ struct memblock memblock __initdata_memblock = {  	.reserved.cnt		= 1,	/* empty dummy entry */  	.reserved.max		= INIT_MEMBLOCK_REGIONS, +	.bottom_up		= false,  	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,  }; @@ -82,6 +85,73 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,  	return (i < type->cnt) ? i : -1;  } +/* + * __memblock_find_range_bottom_up - find free area utility in bottom-up + * @start: start of candidate range + * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} + * @size: size of free area to find + * @align: alignment of free area to find + * @nid: nid of the free area to find, %MAX_NUMNODES for any node + * + * Utility called from memblock_find_in_range_node(), find free area bottom-up. + * + * RETURNS: + * Found address on success, 0 on failure. + */ +static phys_addr_t __init_memblock +__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end, +				phys_addr_t size, phys_addr_t align, int nid) +{ +	phys_addr_t this_start, this_end, cand; +	u64 i; + +	for_each_free_mem_range(i, nid, &this_start, &this_end, NULL) { +		this_start = clamp(this_start, start, end); +		this_end = clamp(this_end, start, end); + +		cand = round_up(this_start, align); +		if (cand < this_end && this_end - cand >= size) +			return cand; +	} + +	return 0; +} + +/** + * __memblock_find_range_top_down - find free area utility, in top-down + * @start: start of candidate range + * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} + * @size: size of free area to find + * @align: alignment of free area to find + * @nid: nid of the free area to find, %MAX_NUMNODES for any node + * + * Utility called from memblock_find_in_range_node(), find free area top-down. + * + * RETURNS: + * Found address on success, 0 on failure. + */ +static phys_addr_t __init_memblock +__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end, +			       phys_addr_t size, phys_addr_t align, int nid) +{ +	phys_addr_t this_start, this_end, cand; +	u64 i; + +	for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { +		this_start = clamp(this_start, start, end); +		this_end = clamp(this_end, start, end); + +		if (this_end < size) +			continue; + +		cand = round_down(this_end - size, align); +		if (cand >= this_start) +			return cand; +	} + +	return 0; +} +  /**   * memblock_find_in_range_node - find free area in given range and node   * @start: start of candidate range @@ -92,15 +162,23 @@ static long __init_memblock memblock_overlaps_region(struct memblock_type *type,   *   * Find @size free area aligned to @align in the specified range and node.   * + * When allocation direction is bottom-up, the @start should be greater + * than the end of the kernel image. Otherwise, it will be trimmed. The + * reason is that we want the bottom-up allocation just near the kernel + * image so it is highly likely that the allocated memory and the kernel + * will reside in the same node. + * + * If bottom-up allocation failed, will try to allocate memory top-down. + *   * RETURNS: - * Found address on success, %0 on failure. + * Found address on success, 0 on failure.   */  phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,  					phys_addr_t end, phys_addr_t size,  					phys_addr_t align, int nid)  { -	phys_addr_t this_start, this_end, cand; -	u64 i; +	int ret; +	phys_addr_t kernel_end;  	/* pump up @end */  	if (end == MEMBLOCK_ALLOC_ACCESSIBLE) @@ -109,19 +187,39 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,  	/* avoid allocating the first page */  	start = max_t(phys_addr_t, start, PAGE_SIZE);  	end = max(start, end); +	kernel_end = __pa_symbol(_end); -	for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) { -		this_start = clamp(this_start, start, end); -		this_end = clamp(this_end, start, end); +	/* +	 * try bottom-up allocation only when bottom-up mode +	 * is set and @end is above the kernel image. +	 */ +	if (memblock_bottom_up() && end > kernel_end) { +		phys_addr_t bottom_up_start; -		if (this_end < size) -			continue; +		/* make sure we will allocate above the kernel */ +		bottom_up_start = max(start, kernel_end); -		cand = round_down(this_end - size, align); -		if (cand >= this_start) -			return cand; +		/* ok, try bottom-up allocation first */ +		ret = __memblock_find_range_bottom_up(bottom_up_start, end, +						      size, align, nid); +		if (ret) +			return ret; + +		/* +		 * we always limit bottom-up allocation above the kernel, +		 * but top-down allocation doesn't have the limit, so +		 * retrying top-down allocation may succeed when bottom-up +		 * allocation failed. +		 * +		 * bottom-up allocation is expected to be fail very rarely, +		 * so we use WARN_ONCE() here to see the stack trace if +		 * fail happens. +		 */ +		WARN_ONCE(1, "memblock: bottom-up allocation failed, " +			     "memory hotunplug may be affected\n");  	} -	return 0; + +	return __memblock_find_range_top_down(start, end, size, align, nid);  }  /** @@ -134,7 +232,7 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,   * Find @size free area aligned to @align in the specified range.   *   * RETURNS: - * Found address on success, %0 on failure. + * Found address on success, 0 on failure.   */  phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,  					phys_addr_t end, phys_addr_t size,  | 
