diff options
author | Jiri Pirko <jiri@mellanox.com> | 2018-01-17 13:46:48 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-01-17 22:53:56 +0300 |
commit | f36fe1c498c8959812415c57b683abaa4527dec5 (patch) | |
tree | 4dc0a63d1438fa524a56dbcaf2c10a4aec772ffa /net/sched | |
parent | 9d3aaff3d8523264ff7082a90759cb8a340200be (diff) | |
download | linux-f36fe1c498c8959812415c57b683abaa4527dec5.tar.xz |
net: sched: introduce block mechanism to handle netif_keep_dst calls
Couple of classifiers call netif_keep_dst directly on q->dev. That is
not possible to do directly for shared blocke where multiple qdiscs are
owning the block. So introduce a infrastructure to keep track of the
block owners in list and use this list to implement block variant of
netif_keep_dst.
Signed-off-by: Jiri Pirko <jiri@mellanox.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Acked-by: David Ahern <dsahern@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/cls_api.c | 69 | ||||
-rw-r--r-- | net/sched/cls_bpf.c | 4 | ||||
-rw-r--r-- | net/sched/cls_flow.c | 2 | ||||
-rw-r--r-- | net/sched/cls_route.c | 2 |
4 files changed, 73 insertions, 4 deletions
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 700595abc641..1ca84230f4de 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -375,6 +375,7 @@ static struct tcf_block *tcf_block_create(struct net *net, struct Qdisc *q, } INIT_LIST_HEAD(&block->chain_list); INIT_LIST_HEAD(&block->cb_list); + INIT_LIST_HEAD(&block->owner_list); /* Create chain 0 by default, it has to be always present. */ chain = tcf_chain_create(block, 0); @@ -406,6 +407,65 @@ static struct tcf_chain *tcf_block_chain_zero(struct tcf_block *block) return list_first_entry(&block->chain_list, struct tcf_chain, list); } +struct tcf_block_owner_item { + struct list_head list; + struct Qdisc *q; + enum tcf_block_binder_type binder_type; +}; + +static void +tcf_block_owner_netif_keep_dst(struct tcf_block *block, + struct Qdisc *q, + enum tcf_block_binder_type binder_type) +{ + if (block->keep_dst && + binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS && + binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) + netif_keep_dst(qdisc_dev(q)); +} + +void tcf_block_netif_keep_dst(struct tcf_block *block) +{ + struct tcf_block_owner_item *item; + + block->keep_dst = true; + list_for_each_entry(item, &block->owner_list, list) + tcf_block_owner_netif_keep_dst(block, item->q, + item->binder_type); +} +EXPORT_SYMBOL(tcf_block_netif_keep_dst); + +static int tcf_block_owner_add(struct tcf_block *block, + struct Qdisc *q, + enum tcf_block_binder_type binder_type) +{ + struct tcf_block_owner_item *item; + + item = kmalloc(sizeof(*item), GFP_KERNEL); + if (!item) + return -ENOMEM; + item->q = q; + item->binder_type = binder_type; + list_add(&item->list, &block->owner_list); + return 0; +} + +static void tcf_block_owner_del(struct tcf_block *block, + struct Qdisc *q, + enum tcf_block_binder_type binder_type) +{ + struct tcf_block_owner_item *item; + + list_for_each_entry(item, &block->owner_list, list) { + if (item->q == q && item->binder_type == binder_type) { + list_del(&item->list); + kfree(item); + return; + } + } + WARN_ON(1); +} + int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, struct tcf_block_ext_info *ei, struct netlink_ext_ack *extack) @@ -435,6 +495,12 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, } } + err = tcf_block_owner_add(block, q, ei->binder_type); + if (err) + goto err_block_owner_add; + + tcf_block_owner_netif_keep_dst(block, q, ei->binder_type); + err = tcf_chain_head_change_cb_add(tcf_block_chain_zero(block), ei, extack); if (err) @@ -444,6 +510,8 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q, return 0; err_chain_head_change_cb_add: + tcf_block_owner_del(block, q, ei->binder_type); +err_block_owner_add: if (created) { if (tcf_block_shared(block)) tcf_block_remove(block, net); @@ -489,6 +557,7 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q, if (!block) return; tcf_chain_head_change_cb_del(tcf_block_chain_zero(block), ei); + tcf_block_owner_del(block, q, ei->binder_type); if (--block->refcnt == 0) { if (tcf_block_shared(block)) diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 8d78e7f4ecc3..d79cc5086509 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -392,8 +392,8 @@ static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog, prog->bpf_name = name; prog->filter = fp; - if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS)) - netif_keep_dst(qdisc_dev(tp->q)); + if (fp->dst_needed) + tcf_block_netif_keep_dst(tp->chain->block); return 0; } diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c index 25c2a888e1f0..28cd6fb52c16 100644 --- a/net/sched/cls_flow.c +++ b/net/sched/cls_flow.c @@ -526,7 +526,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb, timer_setup(&fnew->perturb_timer, flow_perturbation, TIMER_DEFERRABLE); - netif_keep_dst(qdisc_dev(tp->q)); + tcf_block_netif_keep_dst(tp->chain->block); if (tb[TCA_FLOW_KEYS]) { fnew->keymask = keymask; diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index ac9a5b8825b9..a1f2b1b7c014 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -527,7 +527,7 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (f->handle < f1->handle) break; - netif_keep_dst(qdisc_dev(tp->q)); + tcf_block_netif_keep_dst(tp->chain->block); rcu_assign_pointer(f->next, f1); rcu_assign_pointer(*fp, f); |