diff options
author | Tejun Heo <tj@kernel.org> | 2009-09-24 13:18:55 +0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-09-29 04:17:57 +0400 |
commit | a70c691376c7c7f94af41395848066f59501fffd (patch) | |
tree | 8fec04ca394fb1023020c85977d149c83e77e295 /arch/sparc | |
parent | fb59e72e7e10fd9d31f4e522f1b28254c2cc8a6c (diff) | |
download | linux-a70c691376c7c7f94af41395848066f59501fffd.tar.xz |
sparc64: implement page mapping percpu first chunk allocator
Implement page mapping percpu first chunk allocator as a fallback to
the embedding allocator. The next patch will make the embedding
allocator check distances between units to determine whether it fits
within the vmalloc area so that this fallback can be used on such
cases.
sparc64 currently has relatively small vmalloc area which makes it
impossible to create any dynamic chunks on certain configurations
leading to percpu allocation failures. This and the next patch should
allow those configurations to keep working until proper solution is
found.
While at it, mark pcpu_cpu_distance() with __init.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r-- | arch/sparc/Kconfig | 3 | ||||
-rw-r--r-- | arch/sparc/kernel/smp_64.c | 53 |
2 files changed, 47 insertions, 9 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 97fca4695e0b..ac45aab741a5 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -102,6 +102,9 @@ config HAVE_SETUP_PER_CPU_AREA config NEED_PER_CPU_EMBED_FIRST_CHUNK def_bool y if SPARC64 +config NEED_PER_CPU_PAGE_FIRST_CHUNK + def_bool y if SPARC64 + config GENERIC_HARDIRQS_NO__DO_IRQ bool def_bool y if SPARC64 diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index ff68373ce6d6..aa36223497b9 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -1420,7 +1420,7 @@ static void __init pcpu_free_bootmem(void *ptr, size_t size) free_bootmem(__pa(ptr), size); } -static int pcpu_cpu_distance(unsigned int from, unsigned int to) +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) { if (cpu_to_node(from) == cpu_to_node(to)) return LOCAL_DISTANCE; @@ -1428,18 +1428,53 @@ static int pcpu_cpu_distance(unsigned int from, unsigned int to) return REMOTE_DISTANCE; } +static void __init pcpu_populate_pte(unsigned long addr) +{ + pgd_t *pgd = pgd_offset_k(addr); + pud_t *pud; + pmd_t *pmd; + + pud = pud_offset(pgd, addr); + if (pud_none(*pud)) { + pmd_t *new; + + new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + pud_populate(&init_mm, pud, new); + } + + pmd = pmd_offset(pud, addr); + if (!pmd_present(*pmd)) { + pte_t *new; + + new = __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE); + pmd_populate_kernel(&init_mm, pmd, new); + } +} + void __init setup_per_cpu_areas(void) { unsigned long delta; unsigned int cpu; - int rc; - - rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, - PERCPU_DYNAMIC_RESERVE, 4 << 20, - pcpu_cpu_distance, pcpu_alloc_bootmem, - pcpu_free_bootmem); - if (rc) - panic("failed to initialize first chunk (%d)", rc); + int rc = -EINVAL; + + if (pcpu_chosen_fc != PCPU_FC_PAGE) { + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, 4 << 20, + pcpu_cpu_distance, + pcpu_alloc_bootmem, + pcpu_free_bootmem); + if (rc) + pr_warning("PERCPU: %s allocator failed (%d), " + "falling back to page size\n", + pcpu_fc_names[pcpu_chosen_fc], rc); + } + if (rc < 0) + rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, + pcpu_alloc_bootmem, + pcpu_free_bootmem, + pcpu_populate_pte); + if (rc < 0) + panic("cannot initialize percpu area (err=%d)", rc); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) |