summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2025-11-21 11:32:51 +0300
committerPaolo Abeni <pabeni@redhat.com>2025-11-25 18:10:32 +0300
commit2f9babc04d74cbf984f0cb5b6e20bd78fdf32997 (patch)
treee1a94051f8deef64670cf7d5c5b8f072a6d01db4
parent3c1100f042c006cae6c241028cc4c69e1a70483f (diff)
downloadlinux-2f9babc04d74cbf984f0cb5b6e20bd78fdf32997.tar.xz
net_sched: sch_fq: prefetch one skb ahead in dequeue()
prefetch the skb that we are likely to dequeue at the next dequeue(). Also call fq_dequeue_skb() a bit sooner in fq_dequeue(). This reduces the window between read of q.qlen and changes of fields in the cache line that could be dirtied by another cpu trying to queue a packet. Signed-off-by: Eric Dumazet <edumazet@google.com> Link: https://patch.msgid.link/20251121083256.674562-10-edumazet@google.com Signed-off-by: Paolo Abeni <pabeni@redhat.com>
-rw-r--r--net/sched/sch_fq.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 0b0ca1aa9251..6e5f2f4f2415 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -480,7 +480,10 @@ static void fq_erase_head(struct Qdisc *sch, struct fq_flow *flow,
struct sk_buff *skb)
{
if (skb == flow->head) {
- flow->head = skb->next;
+ struct sk_buff *next = skb->next;
+
+ prefetch(next);
+ flow->head = next;
} else {
rb_erase(&skb->rbnode, &flow->t_root);
skb->dev = qdisc_dev(sch);
@@ -712,6 +715,7 @@ begin:
goto begin;
}
prefetch(&skb->end);
+ fq_dequeue_skb(sch, f, skb);
if ((s64)(now - time_next_packet - q->ce_threshold) > 0) {
INET_ECN_set_ce(skb);
q->stat_ce_mark++;
@@ -719,7 +723,6 @@ begin:
if (--f->qlen == 0)
q->inactive_flows++;
q->band_pkt_count[fq_skb_cb(skb)->band]--;
- fq_dequeue_skb(sch, f, skb);
} else {
head->first = f->next;
/* force a pass through old_flows to prevent starvation */