summaryrefslogtreecommitdiff
path: root/include/net/sch_generic.h
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-04-02 00:50:14 +0300
committerDavid S. Miller <davem@davemloft.net>2019-04-02 00:50:14 +0300
commitc4df1bddc4306a08bc8ec6a65d34386388d2f42f (patch)
tree394e064316a0965588c3784a36a24fca3a7b352d /include/net/sch_generic.h
parent0db6f8befc32c68bb13d7ffbb2e563c79e913e13 (diff)
parente5f0e8f8e456589d56e4955154ed5d468cd6d286 (diff)
downloadlinux-c4df1bddc4306a08bc8ec6a65d34386388d2f42f.tar.xz
Merge branch 'net-sched-fix-stats-accounting-for-child-NOLOCK-qdiscs'
Paolo Abeni says: ==================== net: sched: fix stats accounting for child NOLOCK qdiscs Currently, stats accounting for NOLOCK qdisc enslaved to classful (lock) qdiscs is buggy. Per CPU values are ignored in most places, as a result, stats dump in the above scenario always report 0 length backlog and parent backlog len is not updated correctly on NOLOCK qdisc removal. The first patch address stats dumping, and the second one child qdisc removal. I'm targeting the net tree as this is a bugfix, but it could be moved to net-next due to the relatively large diffstat. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sch_generic.h')
-rw-r--r--include/net/sch_generic.h44
1 files changed, 37 insertions, 7 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 7d1a0483a17b..a2b38b3deeca 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -923,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch->qstats.overlimits++;
}
+static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
+{
+ __u32 qlen = qdisc_qlen_sum(sch);
+
+ return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
+}
+
+static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
+ __u32 *backlog)
+{
+ struct gnet_stats_queue qstats = { 0 };
+ __u32 len = qdisc_qlen_sum(sch);
+
+ __gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
+ *qlen = qstats.qlen;
+ *backlog = qstats.backlog;
+}
+
+static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
+static inline void qdisc_purge_queue(struct Qdisc *sch)
+{
+ __u32 qlen, backlog;
+
+ qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
+ qdisc_reset(sch);
+ qdisc_tree_reduce_backlog(sch, qlen, backlog);
+}
+
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
{
qh->head = NULL;
@@ -1106,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
sch_tree_lock(sch);
old = *pold;
*pold = new;
- if (old != NULL) {
- unsigned int qlen = old->q.qlen;
- unsigned int backlog = old->qstats.backlog;
-
- qdisc_reset(old);
- qdisc_tree_reduce_backlog(old, qlen, backlog);
- }
+ if (old != NULL)
+ qdisc_tree_flush_backlog(old);
sch_tree_unlock(sch);
return old;