summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-06-08 02:37:14 +0300
committerDavid S. Miller <davem@davemloft.net>2016-06-08 02:37:14 +0300
commit34fe76abbea5174e532681e420fb31139909efb4 (patch)
tree1f241506d6b781b65d1033925f1c1ce6a39c3394 /include
parent64151ae36ed93c45654069c8aff2a7f0125075e8 (diff)
parentedb09eb17ed89eaa82a52dd306beac93e292b485 (diff)
downloadlinux-34fe76abbea5174e532681e420fb31139909efb4.tar.xz
Merge branch 'net-sched-fast-stats'
Eric Dumazet says: ==================== net: sched: faster stats gathering A while back, I sent one RFC patch using lockless stats gathering on 64bit arches. This patch series does it more cleanly, using a seqcount. Since qdisc/class stats are written at dequeue() time, we can ask the dequeue to change the seqcount, so that stats readers can avoid taking the root qdisc lock, and instead the typical read_seqcount_{begin|retry} guarded loop. This does not change fast path costs, as the seqcount increments are not more expensive than the bit manipulation, and allows readers to not freeze the fast path anymore. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/net/gen_stats.h12
-rw-r--r--include/net/sch_generic.h23
3 files changed, 21 insertions, 15 deletions
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index fa6df2699532..59d7e06d88d5 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1862,6 +1862,7 @@ struct net_device {
#endif
struct phy_device *phydev;
struct lock_class_key *qdisc_tx_busylock;
+ struct lock_class_key *qdisc_running_key;
bool proto_down;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 610cd397890e..231e121cc7d9 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -33,10 +33,12 @@ int gnet_stats_start_copy_compat(struct sk_buff *skb, int type,
spinlock_t *lock, struct gnet_dump *d,
int padattr);
-int gnet_stats_copy_basic(struct gnet_dump *d,
+int gnet_stats_copy_basic(const seqcount_t *running,
+ struct gnet_dump *d,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
-void __gnet_stats_copy_basic(struct gnet_stats_basic_packed *bstats,
+void __gnet_stats_copy_basic(const seqcount_t *running,
+ struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu,
struct gnet_stats_basic_packed *b);
int gnet_stats_copy_rate_est(struct gnet_dump *d,
@@ -52,13 +54,15 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock, struct nlattr *opt);
+ spinlock_t *stats_lock,
+ seqcount_t *running, struct nlattr *opt);
void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_rate_est64 *rate_est);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct gnet_stats_rate_est64 *rate_est,
- spinlock_t *stats_lock, struct nlattr *opt);
+ spinlock_t *stats_lock,
+ seqcount_t *running, struct nlattr *opt);
bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,
const struct gnet_stats_rate_est64 *rate_est);
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a1fd76c22a59..c4f5749342ec 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -29,13 +29,6 @@ enum qdisc_state_t {
__QDISC_STATE_THROTTLED,
};
-/*
- * following bits are only changed while qdisc lock is held
- */
-enum qdisc___state_t {
- __QDISC___STATE_RUNNING = 1,
-};
-
struct qdisc_size_table {
struct rcu_head rcu;
struct list_head list;
@@ -93,7 +86,7 @@ struct Qdisc {
unsigned long state;
struct sk_buff_head q;
struct gnet_stats_basic_packed bstats;
- unsigned int __state;
+ seqcount_t running;
struct gnet_stats_queue qstats;
struct rcu_head rcu_head;
int padded;
@@ -104,20 +97,20 @@ struct Qdisc {
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
{
- return (qdisc->__state & __QDISC___STATE_RUNNING) ? true : false;
+ return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
{
if (qdisc_is_running(qdisc))
return false;
- qdisc->__state |= __QDISC___STATE_RUNNING;
+ write_seqcount_begin(&qdisc->running);
return true;
}
static inline void qdisc_run_end(struct Qdisc *qdisc)
{
- qdisc->__state &= ~__QDISC___STATE_RUNNING;
+ write_seqcount_end(&qdisc->running);
}
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
@@ -321,6 +314,14 @@ static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
return qdisc_lock(root);
}
+static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
+{
+ struct Qdisc *root = qdisc_root_sleeping(qdisc);
+
+ ASSERT_RTNL();
+ return &root->running;
+}
+
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
{
return qdisc->dev_queue->dev;