diff options
author | Davide Caratti <dcaratti@redhat.com> | 2018-09-13 20:29:12 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-09-17 01:30:22 +0300 |
commit | 93be42f9173bbc133bd72915b658394308437d00 (patch) | |
tree | 0b4577ae42b508b99ed8da35f8df393499fefdc8 /net/sched | |
parent | c3ec8bcceb07ab81e4ff017b4ebbacc137a5a15e (diff) | |
download | linux-93be42f9173bbc133bd72915b658394308437d00.tar.xz |
net/sched: act_police: use per-cpu counters
use per-CPU counters, instead of sharing a single set of stats with all
cores. This removes the need of using spinlock when statistics are read
or updated.
Signed-off-by: Davide Caratti <dcaratti@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/act_police.c | 46 |
1 files changed, 22 insertions, 24 deletions
diff --git a/net/sched/act_police.c b/net/sched/act_police.c index 393c7a670300..965a48d3ec35 100644 --- a/net/sched/act_police.c +++ b/net/sched/act_police.c @@ -110,7 +110,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, if (!exists) { ret = tcf_idr_create(tn, parm->index, NULL, a, - &act_police_ops, bind, false); + &act_police_ops, bind, true); if (ret) { tcf_idr_cleanup(tn, parm->index); return ret; @@ -137,7 +137,8 @@ static int tcf_police_init(struct net *net, struct nlattr *nla, } if (est) { - err = gen_replace_estimator(&police->tcf_bstats, NULL, + err = gen_replace_estimator(&police->tcf_bstats, + police->common.cpu_bstats, &police->tcf_rate_est, &police->tcf_lock, NULL, est); @@ -207,32 +208,27 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_police *police = to_police(a); - s64 now; - s64 toks; - s64 ptoks = 0; + s64 now, toks, ptoks = 0; + int ret; - spin_lock(&police->tcf_lock); - - bstats_update(&police->tcf_bstats, skb); tcf_lastuse_update(&police->tcf_tm); + bstats_cpu_update(this_cpu_ptr(police->common.cpu_bstats), skb); + spin_lock(&police->tcf_lock); if (police->tcfp_ewma_rate) { struct gnet_stats_rate_est64 sample; if (!gen_estimator_read(&police->tcf_rate_est, &sample) || sample.bps >= police->tcfp_ewma_rate) { - police->tcf_qstats.overlimits++; - if (police->tcf_action == TC_ACT_SHOT) - police->tcf_qstats.drops++; - spin_unlock(&police->tcf_lock); - return police->tcf_action; + ret = police->tcf_action; + goto inc_overlimits; } } if (qdisc_pkt_len(skb) <= police->tcfp_mtu) { if (!police->rate_present) { - spin_unlock(&police->tcf_lock); - return police->tcfp_result; + ret = police->tcfp_result; + goto unlock; } now = ktime_get_ns(); @@ -253,18 +249,20 @@ static int tcf_police_act(struct sk_buff *skb, const struct tc_action *a, police->tcfp_t_c = now; police->tcfp_toks = toks; police->tcfp_ptoks = ptoks; - if (police->tcfp_result == TC_ACT_SHOT) - police->tcf_qstats.drops++; - spin_unlock(&police->tcf_lock); - return police->tcfp_result; + ret = police->tcfp_result; + goto inc_drops; } } - - police->tcf_qstats.overlimits++; - if (police->tcf_action == TC_ACT_SHOT) - police->tcf_qstats.drops++; + ret = police->tcf_action; + +inc_overlimits: + qstats_overlimit_inc(this_cpu_ptr(police->common.cpu_qstats)); +inc_drops: + if (ret == TC_ACT_SHOT) + qstats_drop_inc(this_cpu_ptr(police->common.cpu_qstats)); +unlock: spin_unlock(&police->tcf_lock); - return police->tcf_action; + return ret; } static int tcf_police_dump(struct sk_buff *skb, struct tc_action *a, |