summaryrefslogtreecommitdiff
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
authorBodo Stroesser <bstroesser@fujitsu-siemens.com>2008-04-27 01:10:16 +0400
committerThomas Gleixner <tglx@linutronix.de>2008-04-27 20:26:43 +0400
commitd7b41a24bfb5d7fa02f7b49be1293d468814e424 (patch)
tree37c206c5125aca1084b8c4d76f80c5de4dec7c38 /kernel/hrtimer.c
parentc3bf9bc243092c53946fd6d8ebd6dc2f4e572d48 (diff)
downloadlinux-d7b41a24bfb5d7fa02f7b49be1293d468814e424.tar.xz
hrtimer: timeout too long when using HRTIMER_CB_SOFTIRQ
When using hrtimer with timer->cb_mode == HRTIMER_CB_SOFTIRQ in some cases the clockevent is not programmed. This happens, if: - a timer is rearmed while it's state is HRTIMER_STATE_CALLBACK - hrtimer_reprogram() returns -ETIME, when it is called after CALLBACK is finished. This occurs if the new timer->expires is in the past when CALLBACK is done. In this case, the timer needs to be removed from the tree and put onto the pending list again. The patch is against 2.6.22.5, but AFAICS, it is relevant for 2.6.25 also (in run_hrtimer_pending()). Signed-off-by: Bodo Stroesser <bstroesser@fujitsu-siemens.com> Cc: stable@kernel.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f78777abe769..e379ef0e9c20 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1080,8 +1080,19 @@ static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
* If the timer was rearmed on another CPU, reprogram
* the event device.
*/
- if (timer->base->first == &timer->node)
- hrtimer_reprogram(timer, timer->base);
+ struct hrtimer_clock_base *base = timer->base;
+
+ if (base->first == &timer->node &&
+ hrtimer_reprogram(timer, base)) {
+ /*
+ * Timer is expired. Thus move it from tree to
+ * pending list again.
+ */
+ __remove_hrtimer(timer, base,
+ HRTIMER_STATE_PENDING, 0);
+ list_add_tail(&timer->cb_entry,
+ &base->cpu_base->cb_pending);
+ }
}
}
spin_unlock_irq(&cpu_base->lock);