diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-23 22:06:59 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-23 22:06:59 +0400 |
commit | 02171b4a7c5b555d08c3321332e0c45776518276 (patch) | |
tree | 63f10cdab2a8c1bd9fe5ff29319323ff59419ef8 /arch/x86/mm/init.c | |
parent | 70311aaa8afb9790fb91886749cbf80e7e6cd8d0 (diff) | |
parent | 20167d3421a089a1bf1bd680b150dc69c9506810 (diff) | |
download | linux-02171b4a7c5b555d08c3321332e0c45776518276.tar.xz |
Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 mm changes from Ingo Molnar:
"This tree includes a micro-optimization that avoids cr3 switches
during idling; it fixes corner cases and there's also small cleanups"
Fix up trivial context conflict with the percpu_xx -> this_cpu_xx
changes.
* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86-64: Fix accounting in kernel_physical_mapping_init()
x86/tlb: Clean up and unify TLB_FLUSH_ALL definition
x86: Drop obsolete ARCH_BOOTMEM support
x86, tlb: Switch cr3 in leave_mm() only when needed
x86/mm: Fix the size calculation of mapping tables
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 21 |
1 files changed, 12 insertions, 9 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 4f0cec7e4ffb..319b6f2fb8b9 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -29,8 +29,14 @@ int direct_gbpages #endif ; -static void __init find_early_table_space(unsigned long end, int use_pse, - int use_gbpages) +struct map_range { + unsigned long start; + unsigned long end; + unsigned page_size_mask; +}; + +static void __init find_early_table_space(struct map_range *mr, unsigned long end, + int use_pse, int use_gbpages) { unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; phys_addr_t base; @@ -55,6 +61,9 @@ static void __init find_early_table_space(unsigned long end, int use_pse, #ifdef CONFIG_X86_32 extra += PMD_SIZE; #endif + /* The first 2/4M doesn't use large pages. */ + extra += mr->end - mr->start; + ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; } else ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; @@ -84,12 +93,6 @@ void __init native_pagetable_reserve(u64 start, u64 end) memblock_reserve(start, end - start); } -struct map_range { - unsigned long start; - unsigned long end; - unsigned page_size_mask; -}; - #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ @@ -261,7 +264,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, * nodes are discovered. */ if (!after_bootmem) - find_early_table_space(end, use_pse, use_gbpages); + find_early_table_space(&mr[0], end, use_pse, use_gbpages); for (i = 0; i < nr_range; i++) ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, |