diff options
author | Paul Blakey <paulb@nvidia.com> | 2023-02-18 01:36:15 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-02-21 03:46:10 +0300 |
commit | 08a0063df3aed8d76a4034279117db12dbc1050f (patch) | |
tree | 669bec635c692e22d0e0c11af84d33098fea9e38 /net/sched | |
parent | 80cd22c35c9001fe72bf614d29439de41933deca (diff) | |
download | linux-08a0063df3aed8d76a4034279117db12dbc1050f.tar.xz |
net/sched: flower: Move filter handle initialization earlier
To support miss to action during hardware offload the filter's
handle is needed when setting up the actions (tcf_exts_init()),
and before offloading.
Move filter handle initialization earlier.
Signed-off-by: Paul Blakey <paulb@nvidia.com>
Reviewed-by: Jiri Pirko <jiri@nvidia.com>
Reviewed-by: Simon Horman <simon.horman@corigine.com>
Reviewed-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/sched')
-rw-r--r-- | net/sched/cls_flower.c | 62 |
1 files changed, 35 insertions, 27 deletions
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index 885c95191ccf..be01d39dd7b9 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -2187,10 +2187,6 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, INIT_LIST_HEAD(&fnew->hw_list); refcount_set(&fnew->refcnt, 1); - err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0); - if (err < 0) - goto errout; - if (tb[TCA_FLOWER_FLAGS]) { fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]); @@ -2200,15 +2196,45 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, } } + if (!fold) { + spin_lock(&tp->lock); + if (!handle) { + handle = 1; + err = idr_alloc_u32(&head->handle_idr, fnew, &handle, + INT_MAX, GFP_ATOMIC); + } else { + err = idr_alloc_u32(&head->handle_idr, fnew, &handle, + handle, GFP_ATOMIC); + + /* Filter with specified handle was concurrently + * inserted after initial check in cls_api. This is not + * necessarily an error if NLM_F_EXCL is not set in + * message flags. Returning EAGAIN will cause cls_api to + * try to update concurrently inserted rule. + */ + if (err == -ENOSPC) + err = -EAGAIN; + } + spin_unlock(&tp->lock); + + if (err) + goto errout; + } + fnew->handle = handle; + + err = tcf_exts_init(&fnew->exts, net, TCA_FLOWER_ACT, 0); + if (err < 0) + goto errout_idr; + err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], tp->chain->tmplt_priv, flags, fnew->flags, extack); if (err) - goto errout; + goto errout_idr; err = fl_check_assign_mask(head, fnew, fold, mask); if (err) - goto errout; + goto errout_idr; err = fl_ht_insert_unique(fnew, fold, &in_ht); if (err) @@ -2274,29 +2300,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, refcount_dec(&fold->refcnt); __fl_put(fold); } else { - if (handle) { - /* user specifies a handle and it doesn't exist */ - err = idr_alloc_u32(&head->handle_idr, fnew, &handle, - handle, GFP_ATOMIC); - - /* Filter with specified handle was concurrently - * inserted after initial check in cls_api. This is not - * necessarily an error if NLM_F_EXCL is not set in - * message flags. Returning EAGAIN will cause cls_api to - * try to update concurrently inserted rule. - */ - if (err == -ENOSPC) - err = -EAGAIN; - } else { - handle = 1; - err = idr_alloc_u32(&head->handle_idr, fnew, &handle, - INT_MAX, GFP_ATOMIC); - } - if (err) - goto errout_hw; + idr_replace(&head->handle_idr, fnew, fnew->handle); refcount_inc(&fnew->refcnt); - fnew->handle = handle; list_add_tail_rcu(&fnew->list, &fnew->mask->filters); spin_unlock(&tp->lock); } @@ -2319,6 +2325,8 @@ errout_hw: fnew->mask->filter_ht_params); errout_mask: fl_mask_put(head, fnew->mask); +errout_idr: + idr_remove(&head->handle_idr, fnew->handle); errout: __fl_put(fnew); errout_tb: |