summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@kernel.org>2025-04-15 01:43:47 +0300
committerPaul E. McKenney <paulmck@kernel.org>2025-05-09 02:13:27 +0300
commit123a1d97b2baf9eba9662a5f65660edc317e0bb8 (patch)
treee0305fd65834bfcfe7df5464406a378f529c1f4b /lib
parent21ac6e5edac569df3938b136471c59c1d3d01f09 (diff)
downloadlinux-123a1d97b2baf9eba9662a5f65660edc317e0bb8.tar.xz
ratelimit: Avoid atomic decrement if already rate-limited
Currently, if the lock could not be acquired, the code unconditionally does an atomic decrement on ->rs_n_left, even if that atomic operation is guaranteed to return a limit-rate verdict. This incurs needless overhead and also raises the spectre of counter wrap. Therefore, do the atomic decrement only if there is some chance that rates won't be limited. Link: https://lore.kernel.org/all/fbe93a52-365e-47fe-93a4-44a44547d601@paulmck-laptop/ Link: https://lore.kernel.org/all/20250423115409.3425-1-spasswolf@web.de/ Signed-off-by: Paul E. McKenney <paulmck@kernel.org> Reviewed-by: Petr Mladek <pmladek@suse.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Kuniyuki Iwashima <kuniyu@amazon.com> Cc: Mateusz Guzik <mjguzik@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: John Ogness <john.ogness@linutronix.de> Cc: Sergey Senozhatsky <senozhatsky@chromium.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/ratelimit.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/lib/ratelimit.c b/lib/ratelimit.c
index 4e520d029d28..a7aaebb7a718 100644
--- a/lib/ratelimit.c
+++ b/lib/ratelimit.c
@@ -65,8 +65,10 @@ int ___ratelimit(struct ratelimit_state *rs, const char *func)
unsigned int rs_flags = READ_ONCE(rs->flags);
if (rs_flags & RATELIMIT_INITIALIZED && burst) {
- int n_left;
+ int n_left = atomic_read(&rs->rs_n_left);
+ if (n_left <= 0)
+ return 0;
n_left = atomic_dec_return(&rs->rs_n_left);
if (n_left >= 0)
return 1;