diff options
author | John Fastabend <john.fastabend@gmail.com> | 2014-09-28 22:52:56 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-09-30 09:02:26 +0400 |
commit | 22e0f8b9322cb1a48b1357e8f4ae6f5a9eca8cfa (patch) | |
tree | 2c9ef18dca9d9a441d92ea57cf7f7a292f4ceb3f /include/net/sch_generic.h | |
parent | 79cf79abce71eb7dbc40e2f3121048ca5405cb47 (diff) | |
download | linux-22e0f8b9322cb1a48b1357e8f4ae6f5a9eca8cfa.tar.xz |
net: sched: make bstats per cpu and estimator RCU safe
In order to run qdisc's without locking statistics and estimators
need to be handled correctly.
To resolve bstats make the statistics per cpu. And because this is
only needed for qdiscs that are running without locks which is not
the case for most qdiscs in the near future only create percpu
stats when qdiscs set the TCQ_F_CPUSTATS flag.
Next because estimators use the bstats to calculate packets per
second and bytes per second the estimator code paths are updated
to use the per cpu statistics.
Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/net/sch_generic.h')
-rw-r--r-- | include/net/sch_generic.h | 22 |
1 files changed, 21 insertions, 1 deletions
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index e65b8e0752af..4b9351120fd8 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -6,6 +6,7 @@ #include <linux/rcupdate.h> #include <linux/pkt_sched.h> #include <linux/pkt_cls.h> +#include <linux/percpu.h> #include <net/gen_stats.h> #include <net/rtnetlink.h> @@ -58,6 +59,7 @@ struct Qdisc { * multiqueue device. */ #define TCQ_F_WARN_NONWC (1 << 16) +#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */ u32 limit; const struct Qdisc_ops *ops; struct qdisc_size_table __rcu *stab; @@ -83,7 +85,10 @@ struct Qdisc { */ unsigned long state; struct sk_buff_head q; - struct gnet_stats_basic_packed bstats; + union { + struct gnet_stats_basic_packed bstats; + struct gnet_stats_basic_cpu __percpu *cpu_bstats; + } __packed; unsigned int __state; struct gnet_stats_queue qstats; struct rcu_head rcu_head; @@ -487,6 +492,10 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; } +static inline bool qdisc_is_percpu_stats(const struct Qdisc *q) +{ + return q->flags & TCQ_F_CPUSTATS; +} static inline void bstats_update(struct gnet_stats_basic_packed *bstats, const struct sk_buff *skb) @@ -495,6 +504,17 @@ static inline void bstats_update(struct gnet_stats_basic_packed *bstats, bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1; } +static inline void qdisc_bstats_update_cpu(struct Qdisc *sch, + const struct sk_buff *skb) +{ + struct gnet_stats_basic_cpu *bstats = + this_cpu_ptr(sch->cpu_bstats); + + u64_stats_update_begin(&bstats->syncp); + bstats_update(&bstats->bstats, skb); + u64_stats_update_end(&bstats->syncp); +} + static inline void qdisc_bstats_update(struct Qdisc *sch, const struct sk_buff *skb) { |