diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-07-07 02:39:01 +0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2010-08-05 06:56:07 +0400 |
commit | e63075a3c9377536d085bc013cd3fe6323162449 (patch) | |
tree | 28fde124dde6df867947882fc686d228502846df /arch/powerpc/mm | |
parent | 27f574c223d2c09610058b3ec7a29582d63a3e06 (diff) | |
download | linux-e63075a3c9377536d085bc013cd3fe6323162449.tar.xz |
memblock: Introduce default allocation limit and use it to replace explicit ones
This introduce memblock.current_limit which is used to limit allocations
from memblock_alloc() or memblock_alloc_base(..., MEMBLOCK_ALLOC_ACCESSIBLE).
The old MEMBLOCK_ALLOC_ANYWHERE changes value from 0 to ~(u64)0 and can still
be used with memblock_alloc_base() to allocate really anywhere.
It is -no-longer- cropped to MEMBLOCK_REAL_LIMIT which disappears.
Note to archs: I'm leaving the default limit to MEMBLOCK_ALLOC_ANYWHERE. I
strongly recommend that you ensure that you set an appropriate limit
during boot in order to guarantee that an memblock_alloc() at any time
results in something that is accessible with a simple __va().
The reason is that a subsequent patch will introduce the ability for
the array to resize itself by reallocating itself. The MEMBLOCK core will
honor the current limit when performing those allocations.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/init_32.c | 29 | ||||
-rw-r--r-- | arch/powerpc/mm/ppc_mmu_32.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 2 |
6 files changed, 17 insertions, 28 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 1dc2fa5ce1bd..58969b51f454 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -35,6 +35,7 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/highmem.h> +#include <linux/memblock.h> #include <asm/pgalloc.h> #include <asm/prom.h> @@ -47,6 +48,7 @@ #include <asm/bootx.h> #include <asm/machdep.h> #include <asm/setup.h> + #include "mmu_decl.h" extern int __map_without_ltlbs; @@ -139,8 +141,7 @@ unsigned long __init mmu_mapin_ram(unsigned long top) * coverage with normal-sized pages (or other reasons) do not * attempt to allocate outside the allowed range. */ - - __initial_memory_limit_addr = memstart_addr + mapped; + memblock_set_current_limit(memstart_addr + mapped); return mapped; } diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index cdc7526e9c93..e525f862d759 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -40,6 +40,7 @@ #include <linux/init.h> #include <linux/delay.h> #include <linux/highmem.h> +#include <linux/memblock.h> #include <asm/pgalloc.h> #include <asm/prom.h> @@ -212,5 +213,5 @@ void __init adjust_total_lowmem(void) pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, (unsigned int)((total_lowmem - __max_low_memory) >> 20)); - __initial_memory_limit_addr = memstart_addr + __max_low_memory; + memblock_set_current_limit(memstart_addr + __max_low_memory); } diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index a542ff5ec8a9..b05890e23813 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -696,7 +696,8 @@ static void __init htab_initialize(void) #endif /* CONFIG_U3_DART */ BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), prot, mmu_linear_psize, mmu_kernel_ssize)); - } + } + memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); /* * If we have a memory_limit and we've allocated TCEs then we need to diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 6a6975dc2654..59b208b7ec6f 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -92,12 +92,6 @@ int __allow_ioremap_reserved; unsigned long __max_low_memory = MAX_LOW_MEM; /* - * address of the limit of what is accessible with initial MMU setup - - * 256MB usually, but only 16MB on 601. - */ -phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000; - -/* * Check for command-line options that affect what MMU_init will do. */ void MMU_setup(void) @@ -126,13 +120,6 @@ void __init MMU_init(void) if (ppc_md.progress) ppc_md.progress("MMU:enter", 0x111); - /* 601 can only access 16MB at the moment */ - if (PVR_VER(mfspr(SPRN_PVR)) == 1) - __initial_memory_limit_addr = 0x01000000; - /* 8xx can only access 8MB at the moment */ - if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) - __initial_memory_limit_addr = 0x00800000; - /* parse args from command line */ MMU_setup(); @@ -190,20 +177,18 @@ void __init MMU_init(void) #ifdef CONFIG_BOOTX_TEXT btext_unmap(); #endif + + /* Shortly after that, the entire linear mapping will be available */ + memblock_set_current_limit(lowmem_end_addr); } /* This is only called until mem_init is done. */ void __init *early_get_page(void) { - void *p; - - if (init_bootmem_done) { - p = alloc_bootmem_pages(PAGE_SIZE); - } else { - p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, - __initial_memory_limit_addr)); - } - return p; + if (init_bootmem_done) + return alloc_bootmem_pages(PAGE_SIZE); + else + return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); } /* Free up now-unused memory */ diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f8a01829d64f..7d34e170e80f 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -223,8 +223,7 @@ void __init MMU_init_hw(void) * Find some memory for the hash table. */ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); - Hash = __va(memblock_alloc_base(Hash_size, Hash_size, - __initial_memory_limit_addr)); + Hash = __va(memblock_alloc(Hash_size, Hash_size)); cacheable_memzero(Hash, Hash_size); _SDR1 = __pa(Hash) | SDR1_LOW_BITS; diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index d8695b02a968..7ba32e762990 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -432,6 +432,8 @@ static void __early_init_mmu(int boot_cpu) * the MMU configuration */ mb(); + + memblock_set_current_limit(linear_map_top); } void __init early_init_mmu(void) |