summaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/kernel/setup.c18
-rw-r--r--arch/arm/mm/mmu.c39
-rw-r--r--arch/arm/mm/pmsa-v7.c23
-rw-r--r--arch/arm/mm/pmsa-v8.c17
-rw-r--r--arch/arm/xen/mm.c7
5 files changed, 48 insertions, 56 deletions
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index d8e18cdd96d3..3f65d0ac9f63 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -843,20 +843,26 @@ early_param("mem", early_mem);
static void __init request_standard_resources(const struct machine_desc *mdesc)
{
- struct memblock_region *region;
+ phys_addr_t start, end, res_end;
struct resource *res;
+ u64 i;
kernel_code.start = virt_to_phys(_text);
kernel_code.end = virt_to_phys(__init_begin - 1);
kernel_data.start = virt_to_phys(_sdata);
kernel_data.end = virt_to_phys(_end - 1);
- for_each_memblock(memory, region) {
- phys_addr_t start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
- phys_addr_t end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+ for_each_mem_range(i, &start, &end) {
unsigned long boot_alias_start;
/*
+ * In memblock, end points to the first byte after the
+ * range while in resourses, end points to the last byte in
+ * the range.
+ */
+ res_end = end - 1;
+
+ /*
* Some systems have a special memory alias which is only
* used for booting. We need to advertise this region to
* kexec-tools so they know where bootable RAM is located.
@@ -869,7 +875,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
__func__, sizeof(*res));
res->name = "System RAM (boot alias)";
res->start = boot_alias_start;
- res->end = phys_to_idmap(end);
+ res->end = phys_to_idmap(res_end);
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
}
@@ -880,7 +886,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
sizeof(*res));
res->name = "System RAM";
res->start = start;
- res->end = end;
+ res->end = res_end;
res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
request_resource(&iomem_resource, res);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c36f977b2ccb..698cc740c6b8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1154,9 +1154,8 @@ phys_addr_t arm_lowmem_limit __initdata = 0;
void __init adjust_lowmem_bounds(void)
{
- phys_addr_t memblock_limit = 0;
- u64 vmalloc_limit;
- struct memblock_region *reg;
+ phys_addr_t block_start, block_end, memblock_limit = 0;
+ u64 vmalloc_limit, i;
phys_addr_t lowmem_limit = 0;
/*
@@ -1172,26 +1171,18 @@ void __init adjust_lowmem_bounds(void)
* The first usable region must be PMD aligned. Mark its start
* as MEMBLOCK_NOMAP if it isn't
*/
- for_each_memblock(memory, reg) {
- if (!memblock_is_nomap(reg)) {
- if (!IS_ALIGNED(reg->base, PMD_SIZE)) {
- phys_addr_t len;
+ for_each_mem_range(i, &block_start, &block_end) {
+ if (!IS_ALIGNED(block_start, PMD_SIZE)) {
+ phys_addr_t len;
- len = round_up(reg->base, PMD_SIZE) - reg->base;
- memblock_mark_nomap(reg->base, len);
- }
- break;
+ len = round_up(block_start, PMD_SIZE) - block_start;
+ memblock_mark_nomap(block_start, len);
}
+ break;
}
- for_each_memblock(memory, reg) {
- phys_addr_t block_start = reg->base;
- phys_addr_t block_end = reg->base + reg->size;
-
- if (memblock_is_nomap(reg))
- continue;
-
- if (reg->base < vmalloc_limit) {
+ for_each_mem_range(i, &block_start, &block_end) {
+ if (block_start < vmalloc_limit) {
if (block_end > lowmem_limit)
/*
* Compare as u64 to ensure vmalloc_limit does
@@ -1440,19 +1431,15 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
- struct memblock_region *reg;
phys_addr_t kernel_x_start = round_down(__pa(KERNEL_START), SECTION_SIZE);
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ phys_addr_t start, end;
+ u64 i;
/* Map all the lowmem memory banks. */
- for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
+ for_each_mem_range(i, &start, &end) {
struct map_desc map;
- if (memblock_is_nomap(reg))
- continue;
-
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
if (start >= end)
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index 699fa2e88725..88950e41a3a9 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -231,12 +231,12 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,
void __init pmsav7_adjust_lowmem_bounds(void)
{
phys_addr_t specified_mem_size = 0, total_mem_size = 0;
- struct memblock_region *reg;
- bool first = true;
phys_addr_t mem_start;
phys_addr_t mem_end;
+ phys_addr_t reg_start, reg_end;
unsigned int mem_max_regions;
- int num, i;
+ int num;
+ u64 i;
/* Free-up PMSAv7_PROBE_REGION */
mpu_min_region_order = __mpu_min_region_order();
@@ -262,20 +262,19 @@ void __init pmsav7_adjust_lowmem_bounds(void)
mem_max_regions -= num;
#endif
- for_each_memblock(memory, reg) {
- if (first) {
+ for_each_mem_range(i, &reg_start, &reg_end) {
+ if (i == 0) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
* Initially only use memory continuous from
* PHYS_OFFSET */
- if (reg->base != phys_offset)
+ if (reg_start != phys_offset)
panic("First memory bank must be contiguous from PHYS_OFFSET");
- mem_start = reg->base;
- mem_end = reg->base + reg->size;
- specified_mem_size = reg->size;
- first = false;
+ mem_start = reg_start;
+ mem_end = reg_end;
+ specified_mem_size = mem_end - mem_start;
} else {
/*
* memblock auto merges contiguous blocks, remove
@@ -283,8 +282,8 @@ void __init pmsav7_adjust_lowmem_bounds(void)
* blocks separately while iterating)
*/
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
- &mem_end, &reg->base);
- memblock_remove(reg->base, 0 - reg->base);
+ &mem_end, &reg_start);
+ memblock_remove(reg_start, 0 - reg_start);
break;
}
}
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
index 0d7d5fb59247..2de019f7503e 100644
--- a/arch/arm/mm/pmsa-v8.c
+++ b/arch/arm/mm/pmsa-v8.c
@@ -94,20 +94,19 @@ static __init bool is_region_fixed(int number)
void __init pmsav8_adjust_lowmem_bounds(void)
{
phys_addr_t mem_end;
- struct memblock_region *reg;
- bool first = true;
+ phys_addr_t reg_start, reg_end;
+ u64 i;
- for_each_memblock(memory, reg) {
- if (first) {
+ for_each_mem_range(i, &reg_start, &reg_end) {
+ if (i == 0) {
phys_addr_t phys_offset = PHYS_OFFSET;
/*
* Initially only use memory continuous from
* PHYS_OFFSET */
- if (reg->base != phys_offset)
+ if (reg_start != phys_offset)
panic("First memory bank must be contiguous from PHYS_OFFSET");
- mem_end = reg->base + reg->size;
- first = false;
+ mem_end = reg_end;
} else {
/*
* memblock auto merges contiguous blocks, remove
@@ -115,8 +114,8 @@ void __init pmsav8_adjust_lowmem_bounds(void)
* blocks separately while iterating)
*/
pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
- &mem_end, &reg->base);
- memblock_remove(reg->base, 0 - reg->base);
+ &mem_end, &reg_start);
+ memblock_remove(reg_start, 0 - reg_start);
break;
}
}
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 396797ffe2b1..d3ef975a0965 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -25,11 +25,12 @@
unsigned long xen_get_swiotlb_free_pages(unsigned int order)
{
- struct memblock_region *reg;
+ phys_addr_t base;
gfp_t flags = __GFP_NOWARN|__GFP_KSWAPD_RECLAIM;
+ u64 i;
- for_each_memblock(memory, reg) {
- if (reg->base < (phys_addr_t)0xffffffff) {
+ for_each_mem_range(i, &base, NULL) {
+ if (base < (phys_addr_t)0xffffffff) {
if (IS_ENABLED(CONFIG_ZONE_DMA32))
flags |= __GFP_DMA32;
else