diff options
Diffstat (limited to 'mm/memblock.c')
| -rw-r--r-- | mm/memblock.c | 84 | 
1 files changed, 40 insertions, 44 deletions
diff --git a/mm/memblock.c b/mm/memblock.c index d4382095f8bd..4d9393c7edc9 100644 --- a/mm/memblock.c +++ b/mm/memblock.c @@ -143,30 +143,6 @@ phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,  					   MAX_NUMNODES);  } -/* - * Free memblock.reserved.regions - */ -int __init_memblock memblock_free_reserved_regions(void) -{ -	if (memblock.reserved.regions == memblock_reserved_init_regions) -		return 0; - -	return memblock_free(__pa(memblock.reserved.regions), -		 sizeof(struct memblock_region) * memblock.reserved.max); -} - -/* - * Reserve memblock.reserved.regions - */ -int __init_memblock memblock_reserve_reserved_regions(void) -{ -	if (memblock.reserved.regions == memblock_reserved_init_regions) -		return 0; - -	return memblock_reserve(__pa(memblock.reserved.regions), -		 sizeof(struct memblock_region) * memblock.reserved.max); -} -  static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)  {  	type->total_size -= type->regions[r].size; @@ -184,6 +160,18 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u  	}  } +phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info( +					phys_addr_t *addr) +{ +	if (memblock.reserved.regions == memblock_reserved_init_regions) +		return 0; + +	*addr = __pa(memblock.reserved.regions); + +	return PAGE_ALIGN(sizeof(struct memblock_region) * +			  memblock.reserved.max); +} +  /**   * memblock_double_array - double the size of the memblock regions array   * @type: memblock type of the regions array being doubled @@ -204,6 +192,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,  						phys_addr_t new_area_size)  {  	struct memblock_region *new_array, *old_array; +	phys_addr_t old_alloc_size, new_alloc_size;  	phys_addr_t old_size, new_size, addr;  	int use_slab = slab_is_available();  	int *in_slab; @@ -217,6 +206,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,  	/* Calculate new doubled size */  	old_size = type->max * sizeof(struct memblock_region);  	new_size = old_size << 1; +	/* +	 * We need to allocated new one align to PAGE_SIZE, +	 *   so we can free them completely later. +	 */ +	old_alloc_size = PAGE_ALIGN(old_size); +	new_alloc_size = PAGE_ALIGN(new_size);  	/* Retrieve the slab flag */  	if (type == &memblock.memory) @@ -227,13 +222,13 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,  	/* Try to find some space for it.  	 *  	 * WARNING: We assume that either slab_is_available() and we use it or -	 * we use MEMBLOCK for allocations. That means that this is unsafe to use -	 * when bootmem is currently active (unless bootmem itself is implemented -	 * on top of MEMBLOCK which isn't the case yet) +	 * we use MEMBLOCK for allocations. That means that this is unsafe to +	 * use when bootmem is currently active (unless bootmem itself is +	 * implemented on top of MEMBLOCK which isn't the case yet)  	 *  	 * This should however not be an issue for now, as we currently only -	 * call into MEMBLOCK while it's still active, or much later when slab is -	 * active for memory hotplug operations +	 * call into MEMBLOCK while it's still active, or much later when slab +	 * is active for memory hotplug operations  	 */  	if (use_slab) {  		new_array = kmalloc(new_size, GFP_KERNEL); @@ -245,11 +240,11 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,  		addr = memblock_find_in_range(new_area_start + new_area_size,  						memblock.current_limit, -						new_size, sizeof(phys_addr_t)); +						new_alloc_size, PAGE_SIZE);  		if (!addr && new_area_size)  			addr = memblock_find_in_range(0, -					min(new_area_start, memblock.current_limit), -					new_size, sizeof(phys_addr_t)); +				min(new_area_start, memblock.current_limit), +				new_alloc_size, PAGE_SIZE);  		new_array = addr ? __va(addr) : 0;  	} @@ -259,12 +254,14 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,  		return -1;  	} -	memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", -		 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); +	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]", +			memblock_type_name(type), type->max * 2, (u64)addr, +			(u64)addr + new_size - 1); -	/* Found space, we now need to move the array over before -	 * we add the reserved region since it may be our reserved -	 * array itself that is full. +	/* +	 * Found space, we now need to move the array over before we add the +	 * reserved region since it may be our reserved array itself that is +	 * full.  	 */  	memcpy(new_array, type->regions, old_size);  	memset(new_array + type->max, 0, old_size); @@ -272,20 +269,19 @@ static int __init_memblock memblock_double_array(struct memblock_type *type,  	type->regions = new_array;  	type->max <<= 1; -	/* Free old array. We needn't free it if the array is the -	 * static one -	 */ +	/* Free old array. We needn't free it if the array is the static one */  	if (*in_slab)  		kfree(old_array);  	else if (old_array != memblock_memory_init_regions &&  		 old_array != memblock_reserved_init_regions) -		memblock_free(__pa(old_array), old_size); +		memblock_free(__pa(old_array), old_alloc_size); -	/* Reserve the new array if that comes from the memblock. -	 * Otherwise, we needn't do it +	/* +	 * Reserve the new array if that comes from the memblock.  Otherwise, we +	 * needn't do it  	 */  	if (!use_slab) -		BUG_ON(memblock_reserve(addr, new_size)); +		BUG_ON(memblock_reserve(addr, new_alloc_size));  	/* Update slab flag */  	*in_slab = use_slab;  | 
