mirror of
https://github.com/torvalds/linux
synced 2024-10-14 23:39:09 +00:00
timekeeping: Make delta calculation overflow safe
Kernel timekeeping is designed to keep the change in cycles (since the last timer interrupt) below max_cycles, which prevents multiplication overflow when converting cycles to nanoseconds. However, if timer interrupts stop, the calculation will eventually overflow. Add protection against that. In timekeeping_cycles_to_ns() calculation, check against max_cycles, falling back to a slower higher precision calculation. In timekeeping_forward_now(), process delta in chunks of at most max_cycles. Suggested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Link: https://lore.kernel.org/r/20240325064023.2997-18-adrian.hunter@intel.com
This commit is contained in:
parent
e809a80aa0
commit
fcf190c369
|
@ -364,19 +364,32 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Timekeeper helper functions. */
|
/* Timekeeper helper functions. */
|
||||||
|
static noinline u64 delta_to_ns_safe(const struct tk_read_base *tkr, u64 delta)
|
||||||
|
{
|
||||||
|
return mul_u64_u32_add_u64_shr(delta, tkr->mult, tkr->xtime_nsec, tkr->shift);
|
||||||
|
}
|
||||||
|
|
||||||
static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
|
static inline u64 timekeeping_cycles_to_ns(const struct tk_read_base *tkr, u64 cycles)
|
||||||
{
|
{
|
||||||
/* Calculate the delta since the last update_wall_time() */
|
/* Calculate the delta since the last update_wall_time() */
|
||||||
u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
|
u64 mask = tkr->mask, delta = (cycles - tkr->cycle_last) & mask;
|
||||||
|
|
||||||
if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) {
|
/*
|
||||||
/*
|
* This detects the case where the delta overflows the multiplication
|
||||||
* Handle clocksource inconsistency between CPUs to prevent
|
* with tkr->mult.
|
||||||
* time from going backwards by checking for the MSB of the
|
*/
|
||||||
* mask being set in the delta.
|
if (unlikely(delta > tkr->clock->max_cycles)) {
|
||||||
*/
|
if (IS_ENABLED(CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE)) {
|
||||||
if (unlikely(delta & ~(mask >> 1)))
|
/*
|
||||||
return tkr->xtime_nsec >> tkr->shift;
|
* Handle clocksource inconsistency between CPUs to prevent
|
||||||
|
* time from going backwards by checking for the MSB of the
|
||||||
|
* mask being set in the delta.
|
||||||
|
*/
|
||||||
|
if (unlikely(delta & ~(mask >> 1)))
|
||||||
|
return tkr->xtime_nsec >> tkr->shift;
|
||||||
|
}
|
||||||
|
|
||||||
|
return delta_to_ns_safe(tkr, delta);
|
||||||
}
|
}
|
||||||
|
|
||||||
return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;
|
return ((delta * tkr->mult) + tkr->xtime_nsec) >> tkr->shift;
|
||||||
|
@ -789,10 +802,15 @@ static void timekeeping_forward_now(struct timekeeper *tk)
|
||||||
tk->tkr_mono.cycle_last = cycle_now;
|
tk->tkr_mono.cycle_last = cycle_now;
|
||||||
tk->tkr_raw.cycle_last = cycle_now;
|
tk->tkr_raw.cycle_last = cycle_now;
|
||||||
|
|
||||||
tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
|
while (delta > 0) {
|
||||||
tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
|
u64 max = tk->tkr_mono.clock->max_cycles;
|
||||||
|
u64 incr = delta < max ? delta : max;
|
||||||
|
|
||||||
tk_normalize_xtime(tk);
|
tk->tkr_mono.xtime_nsec += incr * tk->tkr_mono.mult;
|
||||||
|
tk->tkr_raw.xtime_nsec += incr * tk->tkr_raw.mult;
|
||||||
|
tk_normalize_xtime(tk);
|
||||||
|
delta -= incr;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
Loading…
Reference in a new issue