diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-03-04 08:35:43 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-03-04 08:35:43 +0300 |
commit | d2c032e3dc58137a7261a7824d3acce435db1d66 (patch) | |
tree | 7eea1c7c6103eefe879f07472eec99b3c41eb792 /arch/x86/mm/init.c | |
parent | 7e8e385aaf6ed5b64b5d9108081cfcdcdd021b78 (diff) | |
parent | 13a7a6ac0a11197edcd0f756a035f472b42cdf8b (diff) | |
download | linux-d2c032e3dc58137a7261a7824d3acce435db1d66.tar.xz |
Merge tag 'v4.0-rc2' into x86/asm, to refresh the tree
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 43 |
1 files changed, 38 insertions, 5 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 090499a363cb..52417e771af9 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -173,11 +173,11 @@ static void __init probe_page_size_mask(void) /* Enable PSE if available */ if (cpu_has_pse) - set_in_cr4(X86_CR4_PSE); + cr4_set_bits_and_update_boot(X86_CR4_PSE); /* Enable PGE if available */ if (cpu_has_pge) { - set_in_cr4(X86_CR4_PGE); + cr4_set_bits_and_update_boot(X86_CR4_PGE); __supported_pte_mask |= _PAGE_GLOBAL; } else __supported_pte_mask &= ~_PAGE_GLOBAL; @@ -239,6 +239,31 @@ static void __init_refok adjust_range_page_size_mask(struct map_range *mr, } } +static const char *page_size_string(struct map_range *mr) +{ + static const char str_1g[] = "1G"; + static const char str_2m[] = "2M"; + static const char str_4m[] = "4M"; + static const char str_4k[] = "4k"; + + if (mr->page_size_mask & (1<<PG_LEVEL_1G)) + return str_1g; + /* + * 32-bit without PAE has a 4M large page size. + * PG_LEVEL_2M is misnamed, but we can at least + * print out the right size in the string. + */ + if (IS_ENABLED(CONFIG_X86_32) && + !IS_ENABLED(CONFIG_X86_PAE) && + mr->page_size_mask & (1<<PG_LEVEL_2M)) + return str_4m; + + if (mr->page_size_mask & (1<<PG_LEVEL_2M)) + return str_2m; + + return str_4k; +} + static int __meminit split_mem_range(struct map_range *mr, int nr_range, unsigned long start, unsigned long end) @@ -334,8 +359,7 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, for (i = 0; i < nr_range; i++) printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n", mr[i].start, mr[i].end - 1, - (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( - (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); + page_size_string(&mr[i])); return nr_range; } @@ -609,7 +633,7 @@ void __init init_mem_mapping(void) * * * On x86, access has to be given to the first megabyte of ram because that area - * contains bios code and data regions used by X and dosemu and similar apps. + * contains BIOS code and data regions used by X and dosemu and similar apps. * Access has to be given to non-kernel-ram areas as well, these contain the PCI * mmio resources as well as potential bios/acpi data regions. */ @@ -714,6 +738,15 @@ void __init zone_sizes_init(void) free_area_init_nodes(max_zone_pfns); } +DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { +#ifdef CONFIG_SMP + .active_mm = &init_mm, + .state = 0, +#endif + .cr4 = ~0UL, /* fail hard if we screw up cr4 shadow initialization */ +}; +EXPORT_SYMBOL_GPL(cpu_tlbstate); + void update_cache_mode_entry(unsigned entry, enum page_cache_mode cache) { /* entry 0 MUST be WB (hardwired to speed up translations) */ |