summaryrefslogtreecommitdiff
path: root/kernel/time
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2024-03-25 09:40:21 +0300
committerThomas Gleixner <tglx@linutronix.de>2024-04-08 16:03:08 +0300
commitfcf190c369149c3b04539797cedf28741eb14164 (patch)
tree9a2860f6c895ca93b5247686cd47b0617f778f2c /kernel/time
parente809a80aa0bcf802f99407c23fd6be6fd4eb250a (diff)
downloadlinux-fcf190c369149c3b04539797cedf28741eb14164.tar.xz
timekeeping: Make delta calculation overflow safe
Kernel timekeeping is designed to keep the change in cycles (since the last timer interrupt) below max_cycles, which prevents multiplication overflow when converting cycles to nanoseconds. However, if timer interrupts stop, the calculation will eventually overflow. Add protection against that. In timekeeping_cycles_to_ns() calculation, check against max_cycles, falling back to a slower higher precision calculation. In timekeeping_forward_now(), process delta in chunks of at most max_cycles. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240325064023.2997-18-adrian.hunter@intel.com
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/timekeeping.c40
1 files changed, 29 insertions, 11 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index d17484082e2c..111dfdbd488f 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -364,19 +364,32 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
}
/* Timekeeper helper functions. */
+static noinline u64 delta_to_ns_safe(const struct tk_read_base *tkr, u64 delta)
+{
+ return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, tkr->shift);
+}
+
static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
{
/* Calculate the delta since the last update_wall_time() */
u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
- if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) {
- /*
- * Handle clocksource inconsistency between CPUs to prevent
- * time from going backwards by checking for the MSB of the
- * mask being set in the delta.
- */
- if (unlikely(delta & ~(mask >> 1)))
- return tkr->xtime_nsec >> tkr->shift;
+ /*
+ * This detects the case where the delta overflows the multiplication
+ * with tkr->mult.
+ */
+ if (unlikely(delta > tkr->clock->max_cycles)) {
+ if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) {
+ /*
+ * Handle clocksource inconsistency between CPUs to prevent
+ * time from going backwards by checking for the MSB of the
+ * mask being set in the delta.
+ */
+ if (unlikely(delta & ~(mask >> 1)))
+ return tkr->xtime_nsec >> tkr->shift;
+ }
+
+ return delta_to_ns_safe(tkr, delta);
}
return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;
@@ -789,10 +802,15 @@ static void timekeeping_forward_now(struct timekeeper *tk)
tk->tkr_mono.cycle_last = cycle_now;
tk->tkr_raw.cycle_last = cycle_now;
- tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
- tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
+ while (delta > 0) {
+ u64 max = tk->tkr_mono.clock->max_cycles;
+ u64 incr = delta < max ? delta : max;
- tk_normalize_xtime(tk);
+ tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult;
+ tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult;
+ tk_normalize_xtime(tk);
+ delta -= incr;
+ }
}
/**