diff options
author | Jan Beulich <jbeulich@novell.com> | 2008-08-21 17:27:22 +0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-22 09:51:53 +0400 |
commit | 8ae3a5a8dff2c92bd1087bb97c4a3bb61174303e (patch) | |
tree | ccf10f1382360d6d91e694da8d8ec4c1dd675367 /arch/x86/mm | |
parent | 38cc1c3df77c1bb739a4766788eb9fa49f16ffdf (diff) | |
download | linux-8ae3a5a8dff2c92bd1087bb97c4a3bb61174303e.tar.xz |
x86: fix 1:1 mapping init on 64-bit (memory hotplug case)
While I don't have a hotplug capable system at hand, I think two issues need
fixing:
- pud_phys (in kernel_physical_ampping_init()) would remain uninitialized in
the after_bootmem case
- the locking done just around phys_pmd_{init,update}() would leave out pgd
updates, and it was needlessly covering code portions that do allocations
(perhaps using a more friendly gfp value in alloc_low_page() would then be
possible)
Signed-off-by: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/init_64.c | 32 |
1 files changed, 18 insertions, 14 deletions
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index a87ea0e4b3dc..8f487705c3e6 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -336,9 +336,12 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, } if (pmd_val(*pmd)) { - if (!pmd_large(*pmd)) + if (!pmd_large(*pmd)) { + spin_lock(&init_mm.page_table_lock); last_map_addr = phys_pte_update(pmd, address, - end); + end); + spin_unlock(&init_mm.page_table_lock); + } /* Count entries we're using from level2_ident_pgt */ if (start == 0) pages++; @@ -347,8 +350,10 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, if (page_size_mask & (1<<PG_LEVEL_2M)) { pages++; + spin_lock(&init_mm.page_table_lock); set_pte((pte_t *)pmd, pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); + spin_unlock(&init_mm.page_table_lock); last_map_addr = (address & PMD_MASK) + PMD_SIZE; continue; } @@ -357,7 +362,9 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end, last_map_addr = phys_pte_init(pte, address, end); unmap_low_page(pte); + spin_lock(&init_mm.page_table_lock); pmd_populate_kernel(&init_mm, pmd, __va(pte_phys)); + spin_unlock(&init_mm.page_table_lock); } update_page_count(PG_LEVEL_2M, pages); return last_map_addr; @@ -370,9 +377,7 @@ phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end, pmd_t *pmd = pmd_offset(pud, 0); unsigned long last_map_addr; - spin_lock(&init_mm.page_table_lock); last_map_addr = phys_pmd_init(pmd, address, end, page_size_mask); - spin_unlock(&init_mm.page_table_lock); __flush_tlb_all(); return last_map_addr; } @@ -408,20 +413,21 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end, if (page_size_mask & (1<<PG_LEVEL_1G)) { pages++; + spin_lock(&init_mm.page_table_lock); set_pte((pte_t *)pud, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL_LARGE)); + spin_unlock(&init_mm.page_table_lock); last_map_addr = (addr & PUD_MASK) + PUD_SIZE; continue; } pmd = alloc_low_page(&pmd_phys); - - spin_lock(&init_mm.page_table_lock); last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask); unmap_low_page(pmd); + + spin_lock(&init_mm.page_table_lock); pud_populate(&init_mm, pud, __va(pmd_phys)); spin_unlock(&init_mm.page_table_lock); - } __flush_tlb_all(); update_page_count(PG_LEVEL_1G, pages); @@ -513,16 +519,14 @@ static unsigned long __init kernel_physical_mapping_init(unsigned long start, continue; } - if (after_bootmem) - pud = pud_offset(pgd, start & PGDIR_MASK); - else - pud = alloc_low_page(&pud_phys); - + pud = alloc_low_page(&pud_phys); last_map_addr = phys_pud_init(pud, __pa(start), __pa(next), page_size_mask); unmap_low_page(pud); - pgd_populate(&init_mm, pgd_offset_k(start), - __va(pud_phys)); + + spin_lock(&init_mm.page_table_lock); + pgd_populate(&init_mm, pgd, __va(pud_phys)); + spin_unlock(&init_mm.page_table_lock); } return last_map_addr; |