summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/pda.h5
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/head64.c10
-rw-r--r--arch/x86/kernel/head_64.S5
-rw-r--r--arch/x86/kernel/setup_percpu.c16
-rw-r--r--arch/x86/kernel/vmlinux_64.lds.S4
6 files changed, 23 insertions, 20 deletions
diff --git a/arch/x86/include/asm/pda.h b/arch/x86/include/asm/pda.h
index e91558e37850..66ae1043393d 100644
--- a/arch/x86/include/asm/pda.h
+++ b/arch/x86/include/asm/pda.h
@@ -7,6 +7,7 @@
#include <linux/cache.h>
#include <linux/threads.h>
#include <asm/page.h>
+#include <asm/percpu.h>
/* Per processor datastructure. %gs points to it while the kernel runs */
struct x8664_pda {
@@ -39,10 +40,10 @@ struct x8664_pda {
unsigned irq_spurious_count;
} ____cacheline_aligned_in_smp;
-extern struct x8664_pda *_cpu_pda[NR_CPUS];
+DECLARE_PER_CPU(struct x8664_pda, __pda);
extern void pda_init(int);
-#define cpu_pda(i) (_cpu_pda[i])
+#define cpu_pda(cpu) (&per_cpu(__pda, cpu))
/*
* There is no fast way to get the base address of the PDA, all the accesses
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 7041acdf5579..c49498d40830 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -879,9 +879,6 @@ static __init int setup_disablecpuid(char *arg)
__setup("clearcpuid=", setup_disablecpuid);
#ifdef CONFIG_X86_64
-struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
-EXPORT_SYMBOL(_cpu_pda);
-
struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss;
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index e99b661a97f4..71b6f6ec96a2 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -26,18 +26,8 @@
#include <asm/bios_ebda.h>
#include <asm/trampoline.h>
-#ifndef CONFIG_SMP
-/* boot cpu pda, referenced by head_64.S to initialize %gs on UP */
-struct x8664_pda _boot_cpu_pda;
-#endif
-
void __init x86_64_init_pda(void)
{
-#ifdef CONFIG_SMP
- cpu_pda(0) = (void *)__per_cpu_load;
-#else
- cpu_pda(0) = &_boot_cpu_pda;
-#endif
pda_init(0);
}
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7a995d0e9f78..c8ace880661b 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -19,6 +19,7 @@
#include <asm/msr.h>
#include <asm/cache.h>
#include <asm/processor-flags.h>
+#include <asm/percpu.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@@ -250,7 +251,7 @@ ENTRY(secondary_startup_64)
* secondary CPU,initial_gs should be set to its pda address
* before the CPU runs this code.
*
- * On UP, initial_gs points to _boot_cpu_pda and doesn't
+ * On UP, initial_gs points to PER_CPU_VAR(__pda) and doesn't
* change.
*/
movl $MSR_GS_BASE,%ecx
@@ -284,7 +285,7 @@ ENTRY(secondary_startup_64)
#ifdef CONFIG_SMP
.quad __per_cpu_load
#else
- .quad _boot_cpu_pda
+ .quad PER_CPU_VAR(__pda)
#endif
__FINITDATA
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index be1ff34db112..daeedf82c15f 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -66,6 +66,16 @@ static void __init setup_node_to_cpumask_map(void);
static inline void setup_node_to_cpumask_map(void) { }
#endif
+/*
+ * Define load_pda_offset() and per-cpu __pda for x86_64.
+ * load_pda_offset() is responsible for loading the offset of pda into
+ * %gs.
+ *
+ * On SMP, pda offset also duals as percpu base address and thus it
+ * should be at the start of per-cpu area. To achieve this, it's
+ * preallocated in vmlinux_64.lds.S directly instead of using
+ * DEFINE_PER_CPU().
+ */
#ifdef CONFIG_X86_64
void __cpuinit load_pda_offset(int cpu)
{
@@ -74,6 +84,10 @@ void __cpuinit load_pda_offset(int cpu)
wrmsrl(MSR_GS_BASE, cpu_pda(cpu));
mb();
}
+#ifndef CONFIG_SMP
+DEFINE_PER_CPU(struct x8664_pda, __pda);
+EXPORT_PER_CPU_SYMBOL(__pda);
+#endif
#endif /* CONFIG_SMP && CONFIG_X86_64 */
@@ -180,8 +194,6 @@ void __init setup_per_cpu_areas(void)
memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start);
per_cpu_offset(cpu) = ptr - __per_cpu_start;
#ifdef CONFIG_X86_64
- cpu_pda(cpu) = (void *)ptr;
-
/*
* CPU0 modified pda in the init data area, reload pda
* offset for CPU0 and clear the area for others.
diff --git a/arch/x86/kernel/vmlinux_64.lds.S b/arch/x86/kernel/vmlinux_64.lds.S
index 962f21f1d4d7..d2a0baa87d1b 100644
--- a/arch/x86/kernel/vmlinux_64.lds.S
+++ b/arch/x86/kernel/vmlinux_64.lds.S
@@ -217,10 +217,12 @@ SECTIONS
* percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
* output PHDR, so the next output section - __data_nosave - should
* switch it back to data.init. Also, pda should be at the head of
- * percpu area. Preallocate it.
+ * percpu area. Preallocate it and define the percpu offset symbol
+ * so that it can be accessed as a percpu variable.
*/
. = ALIGN(PAGE_SIZE);
PERCPU_VADDR_PREALLOC(0, :percpu, pda_size)
+ per_cpu____pda = __per_cpu_start;
#else
PERCPU(PAGE_SIZE)
#endif