summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2026-04-27 11:36:04 +0300
committerJakub Kicinski <kuba@kernel.org>2026-04-29 04:27:50 +0300
commit276a98a434964088fccd4745db5b34d6e831e358 (patch)
tree3fba48454fc969d0e977c23abeba8a3fbae54201
parent91a96427b93b9ba27413077b7e825d2fefbfa134 (diff)
downloadlinux-276a98a434964088fccd4745db5b34d6e831e358.tar.xz
net/sched: sch_cake: annotate data-races in cake_dump_stats() (III)
cake_dump_stats() runs without qdisc spinlock being held. In this third patch, I add READ_ONCE()/WRITE_ONCE() annotations for the following fields: - packets - tin_dropped - tin_ecn_mark - ack_drops - peak_delay - avge_delay - base_delay Other annotations are added in following patches, to ease code review. Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc") Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: "Toke Høiland-Jørgensen" <toke@toke.dk> Link: https://patch.msgid.link/20260427083606.459355-4-edumazet@google.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--net/sched/sch_cake.c38
1 files changed, 20 insertions, 18 deletions
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index d7465ee4c550..c5aae31565e9 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -1600,7 +1600,7 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
sch->qstats.backlog -= len;
flow->dropped++;
- b->tin_dropped++;
+ WRITE_ONCE(b->tin_dropped, b->tin_dropped + 1);
if (q->config->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, skb, now, true);
@@ -1820,7 +1820,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
numsegs++;
slen += segs->len;
q->buffer_used += segs->truesize;
- b->packets++;
+ WRITE_ONCE(b->packets, b->packets + 1);
}
/* stats */
@@ -1844,7 +1844,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
ack = cake_ack_filter(q, flow);
if (ack) {
- b->ack_drops++;
+ WRITE_ONCE(b->ack_drops, b->ack_drops + 1);
sch->qstats.drops++;
ack_pkt_len = qdisc_pkt_len(ack);
b->bytes += ack_pkt_len;
@@ -1860,7 +1860,7 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
/* stats */
- b->packets++;
+ WRITE_ONCE(b->packets, b->packets + 1);
b->bytes += len - ack_pkt_len;
b->backlogs[idx] += len - ack_pkt_len;
b->tin_backlog += len - ack_pkt_len;
@@ -2236,7 +2236,7 @@ retry:
b->tin_deficit -= len;
}
flow->dropped++;
- b->tin_dropped++;
+ WRITE_ONCE(b->tin_dropped, b->tin_dropped + 1);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_qstats_drop(sch);
qdisc_dequeue_drop(sch, skb, reason);
@@ -2244,17 +2244,19 @@ retry:
goto retry;
}
- b->tin_ecn_mark += !!flow->cvars.ecn_marked;
+ WRITE_ONCE(b->tin_ecn_mark, b->tin_ecn_mark + !!flow->cvars.ecn_marked);
qdisc_bstats_update(sch, skb);
WRITE_ONCE(q->last_active, now);
/* collect delay stats */
delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
- b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
- b->peak_delay = cake_ewma(b->peak_delay, delay,
- delay > b->peak_delay ? 2 : 8);
- b->base_delay = cake_ewma(b->base_delay, delay,
- delay < b->base_delay ? 2 : 8);
+ WRITE_ONCE(b->avge_delay, cake_ewma(b->avge_delay, delay, 8));
+ WRITE_ONCE(b->peak_delay,
+ cake_ewma(b->peak_delay, delay,
+ delay > b->peak_delay ? 2 : 8));
+ WRITE_ONCE(b->base_delay,
+ cake_ewma(b->base_delay, delay,
+ delay < b->base_delay ? 2 : 8));
len = cake_advance_shaper(q, b, skb, now, false);
flow->deficit -= len;
@@ -3042,17 +3044,17 @@ static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
PUT_TSTAT_U32(INTERVAL_US,
ktime_to_us(ns_to_ktime(b->cparams.interval)));
- PUT_TSTAT_U32(SENT_PACKETS, b->packets);
- PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
- PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
- PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
+ PUT_TSTAT_U32(SENT_PACKETS, READ_ONCE(b->packets));
+ PUT_TSTAT_U32(DROPPED_PACKETS, READ_ONCE(b->tin_dropped));
+ PUT_TSTAT_U32(ECN_MARKED_PACKETS, READ_ONCE(b->tin_ecn_mark));
+ PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, READ_ONCE(b->ack_drops));
PUT_TSTAT_U32(PEAK_DELAY_US,
- ktime_to_us(ns_to_ktime(b->peak_delay)));
+ ktime_to_us(ns_to_ktime(READ_ONCE(b->peak_delay))));
PUT_TSTAT_U32(AVG_DELAY_US,
- ktime_to_us(ns_to_ktime(b->avge_delay)));
+ ktime_to_us(ns_to_ktime(READ_ONCE(b->avge_delay))));
PUT_TSTAT_U32(BASE_DELAY_US,
- ktime_to_us(ns_to_ktime(b->base_delay)));
+ ktime_to_us(ns_to_ktime(READ_ONCE(b->base_delay))));
PUT_TSTAT_U32(WAY_INDIRECT_HITS, READ_ONCE(b->way_hits));
PUT_TSTAT_U32(WAY_MISSES, READ_ONCE(b->way_misses));