diff options
author | Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com> | 2019-05-04 14:46:23 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-05-06 07:49:24 +0300 |
commit | b7fe4ab8a6013c3c721bed91f73e76eec8fb5d89 (patch) | |
tree | 28988ba26e8d1226ec643cd3539e12552a47bc25 | |
parent | 8c8cfc6ed274e6fb86f00b53f3e7811afce29043 (diff) | |
download | linux-b7fe4ab8a6013c3c721bed91f73e76eec8fb5d89.tar.xz |
net/sched: extend matchall offload for hardware statistics
Introduce a new command for matchall classifiers that allows hardware
to update statistics.
Signed-off-by: Pieter Jansen van Vuuren <pieter.jansenvanvuuren@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/pkt_cls.h | 2 | ||||
-rw-r--r-- | net/sched/cls_matchall.c | 20 |
2 files changed, 22 insertions, 0 deletions
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index 2d0470661277..161fcf8516ac 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -760,12 +760,14 @@ tc_cls_flower_offload_flow_rule(struct tc_cls_flower_offload *tc_flow_cmd) enum tc_matchall_command { TC_CLSMATCHALL_REPLACE, TC_CLSMATCHALL_DESTROY, + TC_CLSMATCHALL_STATS, }; struct tc_cls_matchall_offload { struct tc_cls_common_offload common; enum tc_matchall_command command; struct flow_rule *rule; + struct flow_stats stats; unsigned long cookie; }; diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c index 87bff17ac782..da916f39b719 100644 --- a/net/sched/cls_matchall.c +++ b/net/sched/cls_matchall.c @@ -321,6 +321,23 @@ static int mall_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, return 0; } +static void mall_stats_hw_filter(struct tcf_proto *tp, + struct cls_mall_head *head, + unsigned long cookie) +{ + struct tc_cls_matchall_offload cls_mall = {}; + struct tcf_block *block = tp->chain->block; + + tc_cls_common_offload_init(&cls_mall.common, tp, head->flags, NULL); + cls_mall.command = TC_CLSMATCHALL_STATS; + cls_mall.cookie = cookie; + + tc_setup_cb_call(block, TC_SETUP_CLSMATCHALL, &cls_mall, false); + + tcf_exts_stats_update(&head->exts, cls_mall.stats.bytes, + cls_mall.stats.pkts, cls_mall.stats.lastused); +} + static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, struct sk_buff *skb, struct tcmsg *t, bool rtnl_held) { @@ -332,6 +349,9 @@ static int mall_dump(struct net *net, struct tcf_proto *tp, void *fh, if (!head) return skb->len; + if (!tc_skip_hw(head->flags)) + mall_stats_hw_filter(tp, head, (unsigned long)head); + t->tcm_handle = head->handle; nest = nla_nest_start_noflag(skb, TCA_OPTIONS); |