summaryrefslogtreecommitdiff
path: root/arch/arm/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r--arch/arm/kernel/smp.c53
1 files changed, 29 insertions, 24 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index cdeb727527d3..addbbe8028c2 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -58,6 +58,8 @@ enum ipi_msg_type {
IPI_CPU_STOP,
};
+static DECLARE_COMPLETION(cpu_running);
+
int __cpuinit __cpu_up(unsigned int cpu)
{
struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu);
@@ -98,20 +100,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
*/
ret = boot_secondary(cpu, idle);
if (ret == 0) {
- unsigned long timeout;
-
/*
* CPU was successfully started, wait for it
* to come online or time out.
*/
- timeout = jiffies + HZ;
- while (time_before(jiffies, timeout)) {
- if (cpu_online(cpu))
- break;
-
- udelay(10);
- barrier();
- }
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
if (!cpu_online(cpu)) {
pr_crit("CPU%u: failed to come online\n", cpu);
@@ -246,6 +240,8 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid)
store_cpu_topology(cpuid);
}
+static void percpu_timer_setup(void);
+
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@@ -286,22 +282,16 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
/*
* OK, now it's safe to let the boot CPU continue. Wait for
* the CPU migration code to notice that the CPU is online
- * before we continue.
+ * before we continue - which happens after __cpu_up returns.
*/
set_cpu_online(cpu, true);
+ complete(&cpu_running);
/*
* Setup the percpu timer for this CPU.
*/
percpu_timer_setup();
- while (!cpu_active(cpu))
- cpu_relax();
-
- /*
- * cpu_active bit is set, so it's safe to enalbe interrupts
- * now.
- */
local_irq_enable();
local_fiq_enable();
@@ -359,7 +349,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* re-initialize the map in platform_smp_prepare_cpus() if
* present != possible (e.g. physical hotplug).
*/
- init_cpu_present(&cpu_possible_map);
+ init_cpu_present(cpu_possible_mask);
/*
* Initialise the SCU if there are more than one CPU
@@ -459,7 +449,20 @@ static void __cpuinit broadcast_timer_setup(struct clock_event_device *evt)
clockevents_register_device(evt);
}
-void __cpuinit percpu_timer_setup(void)
+static struct local_timer_ops *lt_ops;
+
+#ifdef CONFIG_LOCAL_TIMERS
+int local_timer_register(struct local_timer_ops *ops)
+{
+ if (lt_ops)
+ return -EBUSY;
+
+ lt_ops = ops;
+ return 0;
+}
+#endif
+
+static void __cpuinit percpu_timer_setup(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
@@ -467,7 +470,7 @@ void __cpuinit percpu_timer_setup(void)
evt->cpumask = cpumask_of(cpu);
evt->broadcast = smp_timer_broadcast;
- if (local_timer_setup(evt))
+ if (!lt_ops || lt_ops->setup(evt))
broadcast_timer_setup(evt);
}
@@ -482,7 +485,8 @@ static void percpu_timer_stop(void)
unsigned int cpu = smp_processor_id();
struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu);
- local_timer_stop(evt);
+ if (lt_ops)
+ lt_ops->stop(evt);
}
#endif
@@ -577,8 +581,9 @@ void smp_send_stop(void)
unsigned long timeout;
if (num_online_cpus() > 1) {
- cpumask_t mask = cpu_online_map;
- cpu_clear(smp_processor_id(), mask);
+ struct cpumask mask;
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
smp_cross_call(&mask, IPI_CPU_STOP);
}