summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2008-04-29 12:02:46 +0400
committerIngo Molnar <mingo@elte.hu>2008-05-06 01:56:18 +0400
commitb328ca182f01c2a04b85e0ee8a410720b104fbcc (patch)
treed04ba3472a8f57788fb7e6e614e8e6b3c5a79b55 /kernel
parent104f64549c961a797ff5f7c59946a7caa335c5b0 (diff)
downloadlinux-b328ca182f01c2a04b85e0ee8a410720b104fbcc.tar.xz
sched: fix hrtick_start_fair and CPU-Hotplug
Gautham R Shenoy reported: > While running the usual CPU-Hotplug stress tests on linux-2.6.25, > I noticed the following in the console logs. > > This is a wee bit difficult to reproduce. In the past 10 runs I hit this > only once. > > ------------[ cut here ]------------ > > WARNING: at kernel/sched.c:962 hrtick+0x2e/0x65() > > Just wondering if we are doing a good job at handling the cancellation > of any per-cpu scheduler timers during CPU-Hotplug. This looks like its indeed not cancelled at all and migrates the it to another cpu. Fix it via a proper hotplug notifier mechanism. Reported-by: Gautham R Shenoy <ego@in.ibm.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: stable@kernel.org Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c66
1 files changed, 65 insertions, 1 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d941ddc9ec1d..bee9cbe13c15 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1191,6 +1191,7 @@ static inline void resched_rq(struct rq *rq)
enum {
HRTICK_SET, /* re-programm hrtick_timer */
HRTICK_RESET, /* not a new slice */
+ HRTICK_BLOCK, /* stop hrtick operations */
};
/*
@@ -1202,6 +1203,8 @@ static inline int hrtick_enabled(struct rq *rq)
{
if (!sched_feat(HRTICK))
return 0;
+ if (unlikely(test_bit(HRTICK_BLOCK, &rq->hrtick_flags)))
+ return 0;
return hrtimer_is_hres_active(&rq->hrtick_timer);
}
@@ -1284,7 +1287,63 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-static inline void init_rq_hrtick(struct rq *rq)
+static void hotplug_hrtick_disable(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ rq->hrtick_flags = 0;
+ __set_bit(HRTICK_BLOCK, &rq->hrtick_flags);
+ spin_unlock_irqrestore(&rq->lock, flags);
+
+ hrtick_clear(rq);
+}
+
+static void hotplug_hrtick_enable(int cpu)
+{
+ struct rq *rq = cpu_rq(cpu);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rq->lock, flags);
+ __clear_bit(HRTICK_BLOCK, &rq->hrtick_flags);
+ spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+static int
+hotplug_hrtick(struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ int cpu = (int)(long)hcpu;
+
+ switch (action) {
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ hotplug_hrtick_disable(cpu);
+ return NOTIFY_OK;
+
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+ hotplug_hrtick_enable(cpu);
+ return NOTIFY_OK;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static void init_hrtick(void)
+{
+ hotcpu_notifier(hotplug_hrtick, 0);
+}
+
+static void init_rq_hrtick(struct rq *rq)
{
rq->hrtick_flags = 0;
hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -1321,6 +1380,10 @@ static inline void init_rq_hrtick(struct rq *rq)
void hrtick_resched(void)
{
}
+
+static inline void init_hrtick(void)
+{
+}
#endif
/*
@@ -7943,6 +8006,7 @@ void __init sched_init_smp(void)
put_online_cpus();
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0);
+ init_hrtick();
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed_ptr(current, &non_isolated_cpus) < 0)