mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00
Misc timers fixes:
- Fix time keeping bugs in CLOCK_MONOTONIC_COARSE clocks - Work around absolute relocations into vDSO code that GCC erroneously emits in certain arm64 build environments - Fix a false positive lockdep warning in the i8253 clocksource driver Signed-off-by: Ingo Molnar <mingo@kernel.org> -----BEGIN PGP SIGNATURE----- iQJFBAABCgAvFiEEBpT5eoXrXCwVQwEKEnMQ0APhK1gFAmggVjYRHG1pbmdvQGtl cm5lbC5vcmcACgkQEnMQ0APhK1gcRg//Wiyl1Sth9E3M4++TCTAoZwgty/lEBo/+ u2T3BTI3cu3q+KLr8NEV+l+EOCcycv7AR7dVBKab/LEaliHwvQ0gGxu/Tc2FsQX+ jbk/1COYkwafr3XIRR0QZxU7BppSTzXNfoNqn/MjM+rgpG8CdgtLPuHgDCpeuNBn NHHL46L4a3L7INh03WlVVh34cHnLC+Hq5DNf++Mr8VvlJG8Q5WaPgrnIMfSL5STJ Z6A5l3w4TX1E5C5d/eEJjwUUjbGQDbvWGQRxqLYhXXyS3h379K6BN/5t5pUQgGIU ZOV2MYS8DxGZpS3CXtKLTJxyC2VUzP9VeJTyFjlj7IZQ8UBL/JkQ+wZhuXhWsMVd puje6gmgSP6CQ14/s5WeAU/BFfj5kakYJuAFSa8u+ucHsAKqEEdvk600WC1cXFfn AyKuXq6xZ8M27EoqfemD447b66kh8VDbNmcp9AwiNKBzq5pVVGIfVrXRJakBKKR0 yV7fJUbgogYol6ra9Yx7FjtscKezan52C+ja9UqgMDb262Ez+zo7mMVINXvS6F+e 8byUdLqVlnkoj+xgRylVsbsfY8g145pkc03y0rxBS3EgiGtR+PUQq8Fwduhb9Bkr obIwiqMvo5bW+ULZcGm0Fu1pInXfumofCLAiXq7Vxz7RdpKO3SfsDdIkc9O8aFMJ ubB7DMaGcyg= =Ndox -----END PGP SIGNATURE----- Merge tag 'timers-urgent-2025-05-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip Pull misc timers fixes from Ingo Molnar: - Fix time keeping bugs in CLOCK_MONOTONIC_COARSE clocks - Work around absolute relocations into vDSO code that GCC erroneously emits in certain arm64 build environments - Fix a false positive lockdep warning in the i8253 clocksource driver * tag 'timers-urgent-2025-05-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: clocksource/i8253: Use raw_spinlock_irqsave() in clockevent_i8253_disable() arm64: vdso: Work around invalid absolute relocations from GCC timekeeping: Prevent coarse clocks going backwards
This commit is contained in:
commit
ac814cbbab
|
@ -99,6 +99,19 @@ static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
|
|||
return res;
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB)
|
||||
static __always_inline const struct vdso_time_data *__arch_get_vdso_u_time_data(void)
|
||||
{
|
||||
const struct vdso_time_data *ret = &vdso_u_time_data;
|
||||
|
||||
/* Work around invalid absolute relocations */
|
||||
OPTIMIZER_HIDE_VAR(ret);
|
||||
|
||||
return ret;
|
||||
}
|
||||
#define __arch_get_vdso_u_time_data __arch_get_vdso_u_time_data
|
||||
#endif /* IS_ENABLED(CONFIG_CC_IS_GCC) && IS_ENABLED(CONFIG_PAGE_SIZE_64KB) */
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
|
||||
|
|
|
@ -103,7 +103,7 @@ int __init clocksource_i8253_init(void)
|
|||
#ifdef CONFIG_CLKEVT_I8253
|
||||
void clockevent_i8253_disable(void)
|
||||
{
|
||||
raw_spin_lock(&i8253_lock);
|
||||
guard(raw_spinlock_irqsave)(&i8253_lock);
|
||||
|
||||
/*
|
||||
* Writing the MODE register should stop the counter, according to
|
||||
|
@ -132,8 +132,6 @@ void clockevent_i8253_disable(void)
|
|||
outb_p(0, PIT_CH0);
|
||||
|
||||
outb_p(0x30, PIT_MODE);
|
||||
|
||||
raw_spin_unlock(&i8253_lock);
|
||||
}
|
||||
|
||||
static int pit_shutdown(struct clock_event_device *evt)
|
||||
|
|
|
@ -51,7 +51,7 @@ struct tk_read_base {
|
|||
* @offs_real: Offset clock monotonic -> clock realtime
|
||||
* @offs_boot: Offset clock monotonic -> clock boottime
|
||||
* @offs_tai: Offset clock monotonic -> clock tai
|
||||
* @tai_offset: The current UTC to TAI offset in seconds
|
||||
* @coarse_nsec: The nanoseconds part for coarse time getters
|
||||
* @tkr_raw: The readout base structure for CLOCK_MONOTONIC_RAW
|
||||
* @raw_sec: CLOCK_MONOTONIC_RAW time in seconds
|
||||
* @clock_was_set_seq: The sequence number of clock was set events
|
||||
|
@ -76,6 +76,7 @@ struct tk_read_base {
|
|||
* ntp shifted nano seconds.
|
||||
* @ntp_err_mult: Multiplication factor for scaled math conversion
|
||||
* @skip_second_overflow: Flag used to avoid updating NTP twice with same second
|
||||
* @tai_offset: The current UTC to TAI offset in seconds
|
||||
*
|
||||
* Note: For timespec(64) based interfaces wall_to_monotonic is what
|
||||
* we need to add to xtime (or xtime corrected for sub jiffy times)
|
||||
|
@ -100,7 +101,7 @@ struct tk_read_base {
|
|||
* which results in the following cacheline layout:
|
||||
*
|
||||
* 0: seqcount, tkr_mono
|
||||
* 1: xtime_sec ... tai_offset
|
||||
* 1: xtime_sec ... coarse_nsec
|
||||
* 2: tkr_raw, raw_sec
|
||||
* 3,4: Internal variables
|
||||
*
|
||||
|
@ -121,7 +122,7 @@ struct timekeeper {
|
|||
ktime_t offs_real;
|
||||
ktime_t offs_boot;
|
||||
ktime_t offs_tai;
|
||||
s32 tai_offset;
|
||||
u32 coarse_nsec;
|
||||
|
||||
/* Cacheline 2: */
|
||||
struct tk_read_base tkr_raw;
|
||||
|
@ -144,6 +145,7 @@ struct timekeeper {
|
|||
u32 ntp_error_shift;
|
||||
u32 ntp_err_mult;
|
||||
u32 skip_second_overflow;
|
||||
s32 tai_offset;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
|
||||
|
|
|
@ -164,10 +164,34 @@ static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
|
|||
return ts;
|
||||
}
|
||||
|
||||
static inline struct timespec64 tk_xtime_coarse(const struct timekeeper *tk)
|
||||
{
|
||||
struct timespec64 ts;
|
||||
|
||||
ts.tv_sec = tk->xtime_sec;
|
||||
ts.tv_nsec = tk->coarse_nsec;
|
||||
return ts;
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the nanoseconds part for the coarse time keepers. They can't rely
|
||||
* on xtime_nsec because xtime_nsec could be adjusted by a small negative
|
||||
* amount when the multiplication factor of the clock is adjusted, which
|
||||
* could cause the coarse clocks to go slightly backwards. See
|
||||
* timekeeping_apply_adjustment(). Thus we keep a separate copy for the coarse
|
||||
* clockids which only is updated when the clock has been set or we have
|
||||
* accumulated time.
|
||||
*/
|
||||
static inline void tk_update_coarse_nsecs(struct timekeeper *tk)
|
||||
{
|
||||
tk->coarse_nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
|
||||
}
|
||||
|
||||
static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
|
||||
{
|
||||
tk->xtime_sec = ts->tv_sec;
|
||||
tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
|
||||
tk_update_coarse_nsecs(tk);
|
||||
}
|
||||
|
||||
static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
|
||||
|
@ -175,6 +199,7 @@ static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
|
|||
tk->xtime_sec += ts->tv_sec;
|
||||
tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
|
||||
tk_normalize_xtime(tk);
|
||||
tk_update_coarse_nsecs(tk);
|
||||
}
|
||||
|
||||
static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
|
||||
|
@ -708,6 +733,7 @@ static void timekeeping_forward_now(struct timekeeper *tk)
|
|||
tk_normalize_xtime(tk);
|
||||
delta -= incr;
|
||||
}
|
||||
tk_update_coarse_nsecs(tk);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -804,8 +830,8 @@ EXPORT_SYMBOL_GPL(ktime_get_with_offset);
|
|||
ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
|
||||
{
|
||||
struct timekeeper *tk = &tk_core.timekeeper;
|
||||
unsigned int seq;
|
||||
ktime_t base, *offset = offsets[offs];
|
||||
unsigned int seq;
|
||||
u64 nsecs;
|
||||
|
||||
WARN_ON(timekeeping_suspended);
|
||||
|
@ -813,7 +839,7 @@ ktime_t ktime_get_coarse_with_offset(enum tk_offsets offs)
|
|||
do {
|
||||
seq = read_seqcount_begin(&tk_core.seq);
|
||||
base = ktime_add(tk->tkr_mono.base, *offset);
|
||||
nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
|
||||
nsecs = tk->coarse_nsec;
|
||||
|
||||
} while (read_seqcount_retry(&tk_core.seq, seq));
|
||||
|
||||
|
@ -2161,7 +2187,7 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
|
|||
struct timekeeper *real_tk = &tk_core.timekeeper;
|
||||
unsigned int clock_set = 0;
|
||||
int shift = 0, maxshift;
|
||||
u64 offset;
|
||||
u64 offset, orig_offset;
|
||||
|
||||
guard(raw_spinlock_irqsave)(&tk_core.lock);
|
||||
|
||||
|
@ -2172,7 +2198,7 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
|
|||
offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
|
||||
tk->tkr_mono.cycle_last, tk->tkr_mono.mask,
|
||||
tk->tkr_mono.clock->max_raw_delta);
|
||||
|
||||
orig_offset = offset;
|
||||
/* Check if there's really nothing to do */
|
||||
if (offset < real_tk->cycle_interval && mode == TK_ADV_TICK)
|
||||
return false;
|
||||
|
@ -2205,6 +2231,14 @@ static bool timekeeping_advance(enum timekeeping_adv_mode mode)
|
|||
*/
|
||||
clock_set |= accumulate_nsecs_to_secs(tk);
|
||||
|
||||
/*
|
||||
* To avoid inconsistencies caused adjtimex TK_ADV_FREQ calls
|
||||
* making small negative adjustments to the base xtime_nsec
|
||||
* value, only update the coarse clocks if we accumulated time
|
||||
*/
|
||||
if (orig_offset != offset)
|
||||
tk_update_coarse_nsecs(tk);
|
||||
|
||||
timekeeping_update_from_shadow(&tk_core, clock_set);
|
||||
|
||||
return !!clock_set;
|
||||
|
@ -2248,7 +2282,7 @@ void ktime_get_coarse_real_ts64(struct timespec64 *ts)
|
|||
do {
|
||||
seq = read_seqcount_begin(&tk_core.seq);
|
||||
|
||||
*ts = tk_xtime(tk);
|
||||
*ts = tk_xtime_coarse(tk);
|
||||
} while (read_seqcount_retry(&tk_core.seq, seq));
|
||||
}
|
||||
EXPORT_SYMBOL(ktime_get_coarse_real_ts64);
|
||||
|
@ -2271,7 +2305,7 @@ void ktime_get_coarse_real_ts64_mg(struct timespec64 *ts)
|
|||
|
||||
do {
|
||||
seq = read_seqcount_begin(&tk_core.seq);
|
||||
*ts = tk_xtime(tk);
|
||||
*ts = tk_xtime_coarse(tk);
|
||||
offset = tk_core.timekeeper.offs_real;
|
||||
} while (read_seqcount_retry(&tk_core.seq, seq));
|
||||
|
||||
|
@ -2350,7 +2384,7 @@ void ktime_get_coarse_ts64(struct timespec64 *ts)
|
|||
do {
|
||||
seq = read_seqcount_begin(&tk_core.seq);
|
||||
|
||||
now = tk_xtime(tk);
|
||||
now = tk_xtime_coarse(tk);
|
||||
mono = tk->wall_to_monotonic;
|
||||
} while (read_seqcount_retry(&tk_core.seq, seq));
|
||||
|
||||
|
|
|
@ -98,12 +98,12 @@ void update_vsyscall(struct timekeeper *tk)
|
|||
/* CLOCK_REALTIME_COARSE */
|
||||
vdso_ts = &vc[CS_HRES_COARSE].basetime[CLOCK_REALTIME_COARSE];
|
||||
vdso_ts->sec = tk->xtime_sec;
|
||||
vdso_ts->nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
|
||||
vdso_ts->nsec = tk->coarse_nsec;
|
||||
|
||||
/* CLOCK_MONOTONIC_COARSE */
|
||||
vdso_ts = &vc[CS_HRES_COARSE].basetime[CLOCK_MONOTONIC_COARSE];
|
||||
vdso_ts->sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
|
||||
nsec = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
|
||||
nsec = tk->coarse_nsec;
|
||||
nsec = nsec + tk->wall_to_monotonic.tv_nsec;
|
||||
vdso_ts->sec += __iter_div_u64_rem(nsec, NSEC_PER_SEC, &vdso_ts->nsec);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user