mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-10-23 07:23:12 +02:00

Currently, ___ratelimit() treats a negative ->interval or ->burst as if it was zero, but this is an accident of the current implementation. Therefore, splat in this case, which might have the benefit of detecting use of uninitialized ratelimit_state structures on the one hand or easing addition of new features on the other. Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/ Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/ Signed-off-by: Petr Mladek <pmladek@suse.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Kuniyuki Iwashima <kuniyu@amazon.com> Cc: Mateusz Guzik <mjguzik@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: John Ogness <john.ogness@linutronix.de> Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
129 lines
3.2 KiB
C
129 lines
3.2 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* ratelimit.c - Do something with rate limit.
|
|
*
|
|
* Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com>
|
|
*
|
|
* 2008-05-01 rewrite the function and use a ratelimit_state data struct as
|
|
* parameter. Now every user can use their own standalone ratelimit_state.
|
|
*/
|
|
|
|
#include <linux/ratelimit.h>
|
|
#include <linux/jiffies.h>
|
|
#include <linux/export.h>
|
|
|
|
/*
|
|
* __ratelimit - rate limiting
|
|
* @rs: ratelimit_state data
|
|
* @func: name of calling function
|
|
*
|
|
* This enforces a rate limit: not more than @rs->burst callbacks
|
|
* in every @rs->interval
|
|
*
|
|
* RETURNS:
|
|
* 0 means callbacks will be suppressed.
|
|
* 1 means go ahead and do it.
|
|
*/
|
|
int ___ratelimit(struct ratelimit_state *rs, const char *func)
|
|
{
|
|
/* Paired with WRITE_ONCE() in .proc_handler().
|
|
* Changing two values seperately could be inconsistent
|
|
* and some message could be lost. (See: net_ratelimit_state).
|
|
*/
|
|
int interval = READ_ONCE(rs->interval);
|
|
int burst = READ_ONCE(rs->burst);
|
|
unsigned long flags;
|
|
int ret;
|
|
|
|
/*
|
|
* Zero interval says never limit, otherwise, non-positive burst
|
|
* says always limit.
|
|
*/
|
|
if (interval <= 0 || burst <= 0) {
|
|
WARN_ONCE(interval < 0 || burst < 0, "Negative interval (%d) or burst (%d): Uninitialized ratelimit_state structure?\n", interval, burst);
|
|
ret = interval == 0 || burst > 0;
|
|
if (!(READ_ONCE(rs->flags) & RATELIMIT_INITIALIZED) || (!interval && !burst) ||
|
|
!raw_spin_trylock_irqsave(&rs->lock, flags)) {
|
|
if (!ret)
|
|
ratelimit_state_inc_miss(rs);
|
|
return ret;
|
|
}
|
|
|
|
/* Force re-initialization once re-enabled. */
|
|
rs->flags &= ~RATELIMIT_INITIALIZED;
|
|
if (!ret)
|
|
ratelimit_state_inc_miss(rs);
|
|
goto unlock_ret;
|
|
}
|
|
|
|
/*
|
|
* If we contend on this state's lock then just check if
|
|
* the current burst is used or not. It might cause
|
|
* false positive when we are past the interval and
|
|
* the current lock owner is just about to reset it.
|
|
*/
|
|
if (!raw_spin_trylock_irqsave(&rs->lock, flags)) {
|
|
unsigned int rs_flags = READ_ONCE(rs->flags);
|
|
|
|
if (rs_flags & RATELIMIT_INITIALIZED && burst) {
|
|
int n_left = atomic_read(&rs->rs_n_left);
|
|
|
|
if (n_left <= 0)
|
|
return 0;
|
|
n_left = atomic_dec_return(&rs->rs_n_left);
|
|
if (n_left >= 0)
|
|
return 1;
|
|
}
|
|
|
|
ratelimit_state_inc_miss(rs);
|
|
return 0;
|
|
}
|
|
|
|
if (!(rs->flags & RATELIMIT_INITIALIZED)) {
|
|
rs->begin = jiffies;
|
|
rs->flags |= RATELIMIT_INITIALIZED;
|
|
atomic_set(&rs->rs_n_left, rs->burst);
|
|
}
|
|
|
|
if (time_is_before_jiffies(rs->begin + interval)) {
|
|
int m;
|
|
|
|
/*
|
|
* Reset rs_n_left ASAP to reduce false positives
|
|
* in parallel calls, see above.
|
|
*/
|
|
atomic_set(&rs->rs_n_left, rs->burst);
|
|
rs->begin = jiffies;
|
|
|
|
if (!(rs->flags & RATELIMIT_MSG_ON_RELEASE)) {
|
|
m = ratelimit_state_reset_miss(rs);
|
|
if (m) {
|
|
printk_deferred(KERN_WARNING
|
|
"%s: %d callbacks suppressed\n", func, m);
|
|
}
|
|
}
|
|
}
|
|
if (burst) {
|
|
int n_left = atomic_read(&rs->rs_n_left);
|
|
|
|
/* The burst might have been taken by a parallel call. */
|
|
|
|
if (n_left > 0) {
|
|
n_left = atomic_dec_return(&rs->rs_n_left);
|
|
if (n_left >= 0) {
|
|
ret = 1;
|
|
goto unlock_ret;
|
|
}
|
|
}
|
|
}
|
|
|
|
ratelimit_state_inc_miss(rs);
|
|
ret = 0;
|
|
|
|
unlock_ret:
|
|
raw_spin_unlock_irqrestore(&rs->lock, flags);
|
|
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(___ratelimit);
|