summaryrefslogtreecommitdiff
path: root/arch/x86/mm/cpu_entry_area.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/cpu_entry_area.c')
-rw-r--r--arch/x86/mm/cpu_entry_area.c66
1 files changed, 50 insertions, 16 deletions
diff --git a/arch/x86/mm/cpu_entry_area.c b/arch/x86/mm/cpu_entry_area.c
index 235ff9cfaaf4..21e8b595cbb1 100644
--- a/arch/x86/mm/cpu_entry_area.c
+++ b/arch/x86/mm/cpu_entry_area.c
@@ -15,11 +15,27 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
#endif
+struct cpu_entry_area *get_cpu_entry_area(int cpu)
+{
+ unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
+ BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);
+
+ return (struct cpu_entry_area *) va;
+}
+EXPORT_SYMBOL(get_cpu_entry_area);
+
+void cea_set_pte(void *cea_vaddr, phys_addr_t pa, pgprot_t flags)
+{
+ unsigned long va = (unsigned long) cea_vaddr;
+
+ set_pte_vaddr(va, pfn_pte(pa >> PAGE_SHIFT, flags));
+}
+
static void __init
-set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
+cea_map_percpu_pages(void *cea_vaddr, void *ptr, int pages, pgprot_t prot)
{
- for ( ; pages; pages--, idx--, ptr += PAGE_SIZE)
- __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot);
+ for ( ; pages; pages--, cea_vaddr+= PAGE_SIZE, ptr += PAGE_SIZE)
+ cea_set_pte(cea_vaddr, per_cpu_ptr_to_phys(ptr), prot);
}
/* Setup the fixmap mappings only once per-processor */
@@ -47,10 +63,12 @@ static void __init setup_cpu_entry_area(int cpu)
pgprot_t tss_prot = PAGE_KERNEL;
#endif
- __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot);
- set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, entry_stack_page),
- per_cpu_ptr(&entry_stack_storage, cpu), 1,
- PAGE_KERNEL);
+ cea_set_pte(&get_cpu_entry_area(cpu)->gdt, get_cpu_gdt_paddr(cpu),
+ gdt_prot);
+
+ cea_map_percpu_pages(&get_cpu_entry_area(cpu)->entry_stack_page,
+ per_cpu_ptr(&entry_stack_storage, cpu), 1,
+ PAGE_KERNEL);
/*
* The Intel SDM says (Volume 3, 7.2.1):
@@ -72,10 +90,9 @@ static void __init setup_cpu_entry_area(int cpu)
BUILD_BUG_ON((offsetof(struct tss_struct, x86_tss) ^
offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK);
BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0);
- set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss),
- &per_cpu(cpu_tss_rw, cpu),
- sizeof(struct tss_struct) / PAGE_SIZE,
- tss_prot);
+ cea_map_percpu_pages(&get_cpu_entry_area(cpu)->tss,
+ &per_cpu(cpu_tss_rw, cpu),
+ sizeof(struct tss_struct) / PAGE_SIZE, tss_prot);
#ifdef CONFIG_X86_32
per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
@@ -85,20 +102,37 @@ static void __init setup_cpu_entry_area(int cpu)
BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
BUILD_BUG_ON(sizeof(exception_stacks) !=
sizeof(((struct cpu_entry_area *)0)->exception_stacks));
- set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks),
- &per_cpu(exception_stacks, cpu),
- sizeof(exception_stacks) / PAGE_SIZE,
- PAGE_KERNEL);
+ cea_map_percpu_pages(&get_cpu_entry_area(cpu)->exception_stacks,
+ &per_cpu(exception_stacks, cpu),
+ sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
- __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
+ cea_set_pte(&get_cpu_entry_area(cpu)->entry_trampoline,
__pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
#endif
}
+static __init void setup_cpu_entry_area_ptes(void)
+{
+#ifdef CONFIG_X86_32
+ unsigned long start, end;
+
+ BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE);
+ BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);
+
+ start = CPU_ENTRY_AREA_BASE;
+ end = start + CPU_ENTRY_AREA_MAP_SIZE;
+
+ for (; start < end; start += PMD_SIZE)
+ populate_extra_pte(start);
+#endif
+}
+
void __init setup_cpu_entry_areas(void)
{
unsigned int cpu;
+ setup_cpu_entry_area_ptes();
+
for_each_possible_cpu(cpu)
setup_cpu_entry_area(cpu);
}