diff options
| author | David S. Miller <davem@davemloft.net> | 2018-06-26 17:21:33 +0300 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2018-06-26 17:21:33 +0300 |
| commit | a6e65e5d2ca4b1d3ff4ac03e056f82b6854ed50e (patch) | |
| tree | c0a910ed8cf411968ae1dc86ec9423a7a0c80e4f /include | |
| parent | 572de270f0a38c6d6f329d494d3bf624e05f933b (diff) | |
| parent | 326367427cc09d38e4c1d145131ee2e228ac94c5 (diff) | |
| download | linux-a6e65e5d2ca4b1d3ff4ac03e056f82b6854ed50e.tar.xz | |
Merge branch 'net-sched-support-replay-of-filter-offload-when-binding-to-block'
Jakub Kicinski says:
====================
net: sched: support replay of filter offload when binding to block
This series from John adds the ability to replay filter offload requests
when new offload callback is being registered on a TC block. This is most
likely to take place for shared blocks today, when a block which already
has rules is bound to another interface. Prior to this patch set if any
of the rules were offloaded the block bind would fail.
A new tcf_proto_op is added to generate a filter-specific offload request.
The new 'offload' op is supporting extack from day 0, hence we need to
propagate extack to .ndo_setup_tc TC_BLOCK_BIND/TC_BLOCK_UNBIND and
through tcf_block_cb_register() to tcf_block_playback_offloads().
The immediate use of this patch set is to simplify life of drivers which
require duplicating rules when sharing blocks. Switch drivers (mlxsw)
can bind ports to rule lists dynamically, NIC drivers generally don't
have that ability and need the rules to be duplicated for each ingress
they match on. In code terms this means that switch drivers don't
register multiple callbacks for each port. NIC drivers do, and get a
separate request and hance rule per-port, as if the block was not shared.
The registration fails today, however, if some rules were already present.
As John notes in description of patch 7, drivers which register multiple
callbacks to shared blocks will likely need to flush the rules on block
unbind. This set makes the core not only replay the the offload add
requests but also offload remove requests when callback is unregistered.
v2:
- name parameters in patch 2;
- use unsigned int instead of u32 for in_hw_coun;
- improve extack message in patch 7.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
| -rw-r--r-- | include/net/act_api.h | 3 | ||||
| -rw-r--r-- | include/net/pkt_cls.h | 17 | ||||
| -rw-r--r-- | include/net/sch_generic.h | 21 |
3 files changed, 32 insertions, 9 deletions
diff --git a/include/net/act_api.h b/include/net/act_api.h index 9e59ebfded62..5ff11adbe2a6 100644 --- a/include/net/act_api.h +++ b/include/net/act_api.h @@ -190,9 +190,6 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes, #endif } -typedef int tc_setup_cb_t(enum tc_setup_type type, - void *type_data, void *cb_priv); - #ifdef CONFIG_NET_CLS_ACT int tc_setup_cb_egdev_register(const struct net_device *dev, tc_setup_cb_t *cb, void *cb_priv); diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h index a3c1a2c47cd4..4070b8eb6d14 100644 --- a/include/net/pkt_cls.h +++ b/include/net/pkt_cls.h @@ -73,11 +73,13 @@ void tcf_block_cb_incref(struct tcf_block_cb *block_cb); unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb); struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv); + void *cb_priv, + struct netlink_ext_ack *extack); int tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv); -void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb); + void *cb_priv, struct netlink_ext_ack *extack); +void __tcf_block_cb_unregister(struct tcf_block *block, + struct tcf_block_cb *block_cb); void tcf_block_cb_unregister(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident); @@ -161,7 +163,8 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb) static inline struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, + struct netlink_ext_ack *extack) { return NULL; } @@ -169,13 +172,14 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block, static inline int tcf_block_cb_register(struct tcf_block *block, tc_setup_cb_t *cb, void *cb_ident, - void *cb_priv) + void *cb_priv, struct netlink_ext_ack *extack) { return 0; } static inline -void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb) +void __tcf_block_cb_unregister(struct tcf_block *block, + struct tcf_block_cb *block_cb) { } @@ -596,6 +600,7 @@ struct tc_block_offload { enum tc_block_command command; enum tcf_block_binder_type binder_type; struct tcf_block *block; + struct netlink_ext_ack *extack; }; struct tc_cls_common_offload { diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 6488daa32f82..7432100027b7 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h @@ -20,6 +20,9 @@ struct qdisc_walker; struct tcf_walker; struct module; +typedef int tc_setup_cb_t(enum tc_setup_type type, + void *type_data, void *cb_priv); + struct qdisc_rate_table { struct tc_ratespec rate; u32 data[256]; @@ -256,6 +259,9 @@ struct tcf_proto_ops { bool *last, struct netlink_ext_ack *); void (*walk)(struct tcf_proto*, struct tcf_walker *arg); + int (*reoffload)(struct tcf_proto *tp, bool add, + tc_setup_cb_t *cb, void *cb_priv, + struct netlink_ext_ack *extack); void (*bind_class)(void *, u32, unsigned long); /* rtnetlink specific */ @@ -330,6 +336,21 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags) block->offloadcnt--; } +static inline void +tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt, + u32 *flags, bool add) +{ + if (add) { + if (!*cnt) + tcf_block_offload_inc(block, flags); + (*cnt)++; + } else { + (*cnt)--; + if (!*cnt) + tcf_block_offload_dec(block, flags); + } +} + static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) { struct qdisc_skb_cb *qcb; |
