summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2026-04-23 09:28:39 +0300
committerJakub Kicinski <kuba@kernel.org>2026-04-28 03:41:08 +0300
commitd3aeb889dcbd78e95f500d383799a23d949796e0 (patch)
treeee12f5553564bf9fbf11a127a27942f98472e6d0
parentbde34e84edc8b5571fbde7e941e175a4293ee1eb (diff)
downloadlinux-d3aeb889dcbd78e95f500d383799a23d949796e0.tar.xz
net/sched: sch_choke: annotate data-races in choke_dump_stats()
choke_dump_stats() only runs with RTNL held. It reads fields that can be changed in qdisc fast path. Add READ_ONCE()/WRITE_ONCE() annotations. Fixes: edb09eb17ed8 ("net: sched: do not acquire qdisc spinlock in qdisc/class stats dump") Signed-off-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Jamal Hadi Salim <jhs@mojatatu.com> Link: https://patch.msgid.link/20260423062839.2524324-1-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--net/sched/sch_choke.c26
1 files changed, 16 insertions, 10 deletions
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 94df8e741a97..2875bcdb18a4 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -229,7 +229,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* Draw a packet at random from queue and compare flow */
if (choke_match_random(q, skb, &idx)) {
- q->stats.matched++;
+ WRITE_ONCE(q->stats.matched, q->stats.matched + 1);
choke_drop_by_idx(sch, idx, to_free);
goto congestion_drop;
}
@@ -241,11 +241,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
qdisc_qstats_overlimit(sch);
if (use_harddrop(q) || !use_ecn(q) ||
!INET_ECN_set_ce(skb)) {
- q->stats.forced_drop++;
+ WRITE_ONCE(q->stats.forced_drop,
+ q->stats.forced_drop + 1);
goto congestion_drop;
}
- q->stats.forced_mark++;
+ WRITE_ONCE(q->stats.forced_mark,
+ q->stats.forced_mark + 1);
} else if (++q->vars.qcount) {
if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
q->vars.qcount = 0;
@@ -253,11 +255,13 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
qdisc_qstats_overlimit(sch);
if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
- q->stats.prob_drop++;
+ WRITE_ONCE(q->stats.prob_drop,
+ q->stats.prob_drop + 1);
goto congestion_drop;
}
- q->stats.prob_mark++;
+ WRITE_ONCE(q->stats.prob_mark,
+ q->stats.prob_mark + 1);
}
} else
q->vars.qR = red_random(p);
@@ -272,7 +276,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_SUCCESS;
}
- q->stats.pdrop++;
+ WRITE_ONCE(q->stats.pdrop, q->stats.pdrop + 1);
return qdisc_drop(skb, sch, to_free);
congestion_drop:
@@ -461,10 +465,12 @@ static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
{
struct choke_sched_data *q = qdisc_priv(sch);
struct tc_choke_xstats st = {
- .early = q->stats.prob_drop + q->stats.forced_drop,
- .marked = q->stats.prob_mark + q->stats.forced_mark,
- .pdrop = q->stats.pdrop,
- .matched = q->stats.matched,
+ .early = READ_ONCE(q->stats.prob_drop) +
+ READ_ONCE(q->stats.forced_drop),
+ .marked = READ_ONCE(q->stats.prob_mark) +
+ READ_ONCE(q->stats.forced_mark),
+ .pdrop = READ_ONCE(q->stats.pdrop),
+ .matched = READ_ONCE(q->stats.matched),
};
return gnet_stats_copy_app(d, &st, sizeof(st));