summaryrefslogtreecommitdiff
path: root/net/sched/act_api.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-25 23:17:34 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-25 23:17:34 +0400
commit4ba9920e5e9c0e16b5ed24292d45322907bb9035 (patch)
tree7d023baea59ed0886ded1f0b6d1c6385690b88f7 /net/sched/act_api.c
parent82c477669a4665eb4e52030792051e0559ee2a36 (diff)
parent8b662fe70c68282f78482dc272df0c4f355e49f5 (diff)
downloadlinux-4ba9920e5e9c0e16b5ed24292d45322907bb9035.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) BPF debugger and asm tool by Daniel Borkmann. 2) Speed up create/bind in AF_PACKET, also from Daniel Borkmann. 3) Correct reciprocal_divide and update users, from Hannes Frederic Sowa and Daniel Borkmann. 4) Currently we only have a "set" operation for the hw timestamp socket ioctl, add a "get" operation to match. From Ben Hutchings. 5) Add better trace events for debugging driver datapath problems, also from Ben Hutchings. 6) Implement auto corking in TCP, from Eric Dumazet. Basically, if we have a small send and a previous packet is already in the qdisc or device queue, defer until TX completion or we get more data. 7) Allow userspace to manage ipv6 temporary addresses, from Jiri Pirko. 8) Add a qdisc bypass option for AF_PACKET sockets, from Daniel Borkmann. 9) Share IP header compression code between Bluetooth and IEEE802154 layers, from Jukka Rissanen. 10) Fix ipv6 router reachability probing, from Jiri Benc. 11) Allow packets to be captured on macvtap devices, from Vlad Yasevich. 12) Support tunneling in GRO layer, from Jerry Chu. 13) Allow bonding to be configured fully using netlink, from Scott Feldman. 14) Allow AF_PACKET users to obtain the VLAN TPID, just like they can already get the TCI. From Atzm Watanabe. 15) New "Heavy Hitter" qdisc, from Terry Lam. 16) Significantly improve the IPSEC support in pktgen, from Fan Du. 17) Allow ipv4 tunnels to cache routes, just like sockets. From Tom Herbert. 18) Add Proportional Integral Enhanced packet scheduler, from Vijay Subramanian. 19) Allow openvswitch to mmap'd netlink, from Thomas Graf. 20) Key TCP metrics blobs also by source address, not just destination address. From Christoph Paasch. 21) Support 10G in generic phylib. From Andy Fleming. 22) Try to short-circuit GRO flow compares using device provided RX hash, if provided. From Tom Herbert. The wireless and netfilter folks have been busy little bees too. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2064 commits) net/cxgb4: Fix referencing freed adapter ipv6: reallocate addrconf router for ipv6 address when lo device up fib_frontend: fix possible NULL pointer dereference rtnetlink: remove IFLA_BOND_SLAVE definition rtnetlink: remove check for fill_slave_info in rtnl_have_link_slave_info qlcnic: update version to 5.3.55 qlcnic: Enhance logic to calculate msix vectors. qlcnic: Refactor interrupt coalescing code for all adapters. qlcnic: Update poll controller code path qlcnic: Interrupt code cleanup qlcnic: Enhance Tx timeout debugging. qlcnic: Use bool for rx_mac_learn. bonding: fix u64 division rtnetlink: add missing IFLA_BOND_AD_INFO_UNSPEC sfc: Use the correct maximum TX DMA ring size for SFC9100 Add Shradha Shah as the sfc driver maintainer. net/vxlan: Share RX skb de-marking and checksum checks with ovs tulip: cleanup by using ARRAY_SIZE() ip_tunnel: clear IPCB in ip_tunnel_xmit() in case dst_link_failure() is called net/cxgb4: Don't retrieve stats during recovery ...
Diffstat (limited to 'net/sched/act_api.c')
-rw-r--r--net/sched/act_api.c435
1 files changed, 173 insertions, 262 deletions
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 69cb848e8345..72bdc7166345 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -29,25 +29,16 @@
void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
{
- unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
- struct tcf_common **p1p;
-
- for (p1p = &hinfo->htab[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
- if (*p1p == p) {
- write_lock_bh(hinfo->lock);
- *p1p = p->tcfc_next;
- write_unlock_bh(hinfo->lock);
- gen_kill_estimator(&p->tcfc_bstats,
- &p->tcfc_rate_est);
- /*
- * gen_estimator est_timer() might access p->tcfc_lock
- * or bstats, wait a RCU grace period before freeing p
- */
- kfree_rcu(p, tcfc_rcu);
- return;
- }
- }
- WARN_ON(1);
+ spin_lock_bh(&hinfo->lock);
+ hlist_del(&p->tcfc_head);
+ spin_unlock_bh(&hinfo->lock);
+ gen_kill_estimator(&p->tcfc_bstats,
+ &p->tcfc_rate_est);
+ /*
+ * gen_estimator est_timer() might access p->tcfc_lock
+ * or bstats, wait a RCU grace period before freeing p
+ */
+ kfree_rcu(p, tcfc_rcu);
}
EXPORT_SYMBOL(tcf_hash_destroy);
@@ -71,20 +62,22 @@ int tcf_hash_release(struct tcf_common *p, int bind,
EXPORT_SYMBOL(tcf_hash_release);
static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
- struct tc_action *a, struct tcf_hashinfo *hinfo)
+ struct tc_action *a)
{
+ struct tcf_hashinfo *hinfo = a->ops->hinfo;
+ struct hlist_head *head;
struct tcf_common *p;
int err = 0, index = -1, i = 0, s_i = 0, n_i = 0;
struct nlattr *nest;
- read_lock_bh(hinfo->lock);
+ spin_lock_bh(&hinfo->lock);
s_i = cb->args[0];
for (i = 0; i < (hinfo->hmask + 1); i++) {
- p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
+ head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
- for (; p; p = p->tcfc_next) {
+ hlist_for_each_entry_rcu(p, head, tcfc_head) {
index++;
if (index < s_i)
continue;
@@ -107,7 +100,7 @@ static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
}
}
done:
- read_unlock_bh(hinfo->lock);
+ spin_unlock_bh(&hinfo->lock);
if (n_i)
cb->args[0] += n_i;
return n_i;
@@ -117,10 +110,12 @@ nla_put_failure:
goto done;
}
-static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
- struct tcf_hashinfo *hinfo)
+static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
{
- struct tcf_common *p, *s_p;
+ struct tcf_hashinfo *hinfo = a->ops->hinfo;
+ struct hlist_head *head;
+ struct hlist_node *n;
+ struct tcf_common *p;
struct nlattr *nest;
int i = 0, n_i = 0;
@@ -130,14 +125,12 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a,
if (nla_put_string(skb, TCA_KIND, a->ops->kind))
goto nla_put_failure;
for (i = 0; i < (hinfo->hmask + 1); i++) {
- p = hinfo->htab[tcf_hash(i, hinfo->hmask)];
-
- while (p != NULL) {
- s_p = p->tcfc_next;
- if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo))
+ head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
+ hlist_for_each_entry_safe(p, n, head, tcfc_head) {
+ if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) {
module_put(a->ops->owner);
- n_i++;
- p = s_p;
+ n_i++;
+ }
}
}
if (nla_put_u32(skb, TCA_FCNT, n_i))
@@ -150,48 +143,45 @@ nla_put_failure:
return -EINVAL;
}
-int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
- int type, struct tc_action *a)
+static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
+ int type, struct tc_action *a)
{
- struct tcf_hashinfo *hinfo = a->ops->hinfo;
-
if (type == RTM_DELACTION) {
- return tcf_del_walker(skb, a, hinfo);
+ return tcf_del_walker(skb, a);
} else if (type == RTM_GETACTION) {
- return tcf_dump_walker(skb, cb, a, hinfo);
+ return tcf_dump_walker(skb, cb, a);
} else {
WARN(1, "tcf_generic_walker: unknown action %d\n", type);
return -EINVAL;
}
}
-EXPORT_SYMBOL(tcf_generic_walker);
-struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
+static struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo)
{
- struct tcf_common *p;
+ struct tcf_common *p = NULL;
+ struct hlist_head *head;
- read_lock_bh(hinfo->lock);
- for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
- p = p->tcfc_next) {
+ spin_lock_bh(&hinfo->lock);
+ head = &hinfo->htab[tcf_hash(index, hinfo->hmask)];
+ hlist_for_each_entry_rcu(p, head, tcfc_head)
if (p->tcfc_index == index)
break;
- }
- read_unlock_bh(hinfo->lock);
+ spin_unlock_bh(&hinfo->lock);
return p;
}
-EXPORT_SYMBOL(tcf_hash_lookup);
-u32 tcf_hash_new_index(u32 *idx_gen, struct tcf_hashinfo *hinfo)
+u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo)
{
- u32 val = *idx_gen;
+ u32 val = hinfo->index;
do {
if (++val == 0)
val = 1;
} while (tcf_hash_lookup(val, hinfo));
- return (*idx_gen = val);
+ hinfo->index = val;
+ return val;
}
EXPORT_SYMBOL(tcf_hash_new_index);
@@ -208,9 +198,9 @@ int tcf_hash_search(struct tc_action *a, u32 index)
}
EXPORT_SYMBOL(tcf_hash_search);
-struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
- struct tcf_hashinfo *hinfo)
+struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind)
{
+ struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct tcf_common *p = NULL;
if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) {
if (bind)
@@ -223,9 +213,9 @@ struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind,
EXPORT_SYMBOL(tcf_hash_check);
struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
- struct tc_action *a, int size, int bind,
- u32 *idx_gen, struct tcf_hashinfo *hinfo)
+ struct tc_action *a, int size, int bind)
{
+ struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct tcf_common *p = kzalloc(size, GFP_KERNEL);
if (unlikely(!p))
@@ -235,7 +225,8 @@ struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
p->tcfc_bindcnt = 1;
spin_lock_init(&p->tcfc_lock);
- p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
+ INIT_HLIST_NODE(&p->tcfc_head);
+ p->tcfc_index = index ? index : tcf_hash_new_index(hinfo);
p->tcfc_tm.install = jiffies;
p->tcfc_tm.lastuse = jiffies;
if (est) {
@@ -256,19 +247,18 @@ void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo)
{
unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
- write_lock_bh(hinfo->lock);
- p->tcfc_next = hinfo->htab[h];
- hinfo->htab[h] = p;
- write_unlock_bh(hinfo->lock);
+ spin_lock_bh(&hinfo->lock);
+ hlist_add_head(&p->tcfc_head, &hinfo->htab[h]);
+ spin_unlock_bh(&hinfo->lock);
}
EXPORT_SYMBOL(tcf_hash_insert);
-static struct tc_action_ops *act_base = NULL;
+static LIST_HEAD(act_base);
static DEFINE_RWLOCK(act_mod_lock);
int tcf_register_action(struct tc_action_ops *act)
{
- struct tc_action_ops *a, **ap;
+ struct tc_action_ops *a;
/* Must supply act, dump, cleanup and init */
if (!act->act || !act->dump || !act->cleanup || !act->init)
@@ -281,14 +271,13 @@ int tcf_register_action(struct tc_action_ops *act)
act->walk = tcf_generic_walker;
write_lock(&act_mod_lock);
- for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) {
+ list_for_each_entry(a, &act_base, head) {
if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
write_unlock(&act_mod_lock);
return -EEXIST;
}
}
- act->next = NULL;
- *ap = act;
+ list_add_tail(&act->head, &act_base);
write_unlock(&act_mod_lock);
return 0;
}
@@ -296,17 +285,16 @@ EXPORT_SYMBOL(tcf_register_action);
int tcf_unregister_action(struct tc_action_ops *act)
{
- struct tc_action_ops *a, **ap;
+ struct tc_action_ops *a;
int err = -ENOENT;
write_lock(&act_mod_lock);
- for (ap = &act_base; (a = *ap) != NULL; ap = &a->next)
- if (a == act)
+ list_for_each_entry(a, &act_base, head) {
+ if (a == act) {
+ list_del(&act->head);
+ err = 0;
break;
- if (a) {
- *ap = a->next;
- a->next = NULL;
- err = 0;
+ }
}
write_unlock(&act_mod_lock);
return err;
@@ -316,69 +304,42 @@ EXPORT_SYMBOL(tcf_unregister_action);
/* lookup by name */
static struct tc_action_ops *tc_lookup_action_n(char *kind)
{
- struct tc_action_ops *a = NULL;
+ struct tc_action_ops *a, *res = NULL;
if (kind) {
read_lock(&act_mod_lock);
- for (a = act_base; a; a = a->next) {
+ list_for_each_entry(a, &act_base, head) {
if (strcmp(kind, a->kind) == 0) {
- if (!try_module_get(a->owner)) {
- read_unlock(&act_mod_lock);
- return NULL;
- }
+ if (try_module_get(a->owner))
+ res = a;
break;
}
}
read_unlock(&act_mod_lock);
}
- return a;
+ return res;
}
/* lookup by nlattr */
static struct tc_action_ops *tc_lookup_action(struct nlattr *kind)
{
- struct tc_action_ops *a = NULL;
+ struct tc_action_ops *a, *res = NULL;
if (kind) {
read_lock(&act_mod_lock);
- for (a = act_base; a; a = a->next) {
+ list_for_each_entry(a, &act_base, head) {
if (nla_strcmp(kind, a->kind) == 0) {
- if (!try_module_get(a->owner)) {
- read_unlock(&act_mod_lock);
- return NULL;
- }
+ if (try_module_get(a->owner))
+ res = a;
break;
}
}
read_unlock(&act_mod_lock);
}
- return a;
+ return res;
}
-#if 0
-/* lookup by id */
-static struct tc_action_ops *tc_lookup_action_id(u32 type)
-{
- struct tc_action_ops *a = NULL;
-
- if (type) {
- read_lock(&act_mod_lock);
- for (a = act_base; a; a = a->next) {
- if (a->type == type) {
- if (!try_module_get(a->owner)) {
- read_unlock(&act_mod_lock);
- return NULL;
- }
- break;
- }
- }
- read_unlock(&act_mod_lock);
- }
- return a;
-}
-#endif
-
-int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
+int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
struct tcf_result *res)
{
const struct tc_action *a;
@@ -389,53 +350,39 @@ int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
ret = TC_ACT_OK;
goto exec_done;
}
- while ((a = act) != NULL) {
+ list_for_each_entry(a, actions, list) {
repeat:
- if (a->ops) {
- ret = a->ops->act(skb, a, res);
- if (TC_MUNGED & skb->tc_verd) {
- /* copied already, allow trampling */
- skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
- skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
- }
- if (ret == TC_ACT_REPEAT)
- goto repeat; /* we need a ttl - JHS */
- if (ret != TC_ACT_PIPE)
- goto exec_done;
+ ret = a->ops->act(skb, a, res);
+ if (TC_MUNGED & skb->tc_verd) {
+ /* copied already, allow trampling */
+ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
+ skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd);
}
- act = a->next;
+ if (ret == TC_ACT_REPEAT)
+ goto repeat; /* we need a ttl - JHS */
+ if (ret != TC_ACT_PIPE)
+ goto exec_done;
}
exec_done:
return ret;
}
EXPORT_SYMBOL(tcf_action_exec);
-void tcf_action_destroy(struct tc_action *act, int bind)
+void tcf_action_destroy(struct list_head *actions, int bind)
{
- struct tc_action *a;
+ struct tc_action *a, *tmp;
- for (a = act; a; a = act) {
- if (a->ops) {
- if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
- module_put(a->ops->owner);
- act = act->next;
- kfree(a);
- } else {
- /*FIXME: Remove later - catch insertion bugs*/
- WARN(1, "tcf_action_destroy: BUG? destroying NULL ops\n");
- act = act->next;
- kfree(a);
- }
+ list_for_each_entry_safe(a, tmp, actions, list) {
+ if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
+ module_put(a->ops->owner);
+ list_del(&a->list);
+ kfree(a);
}
}
int
tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
- int err = -EINVAL;
-
- if (a->ops == NULL)
- return err;
return a->ops->dump(skb, a, bind, ref);
}
@@ -446,9 +393,6 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- if (a->ops == NULL)
- return err;
-
if (nla_put_string(skb, TCA_KIND, a->ops->kind))
goto nla_put_failure;
if (tcf_action_copy_stats(skb, a, 0))
@@ -469,14 +413,13 @@ nla_put_failure:
EXPORT_SYMBOL(tcf_action_dump_1);
int
-tcf_action_dump(struct sk_buff *skb, struct tc_action *act, int bind, int ref)
+tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref)
{
struct tc_action *a;
int err = -EINVAL;
struct nlattr *nest;
- while ((a = act) != NULL) {
- act = a->next;
+ list_for_each_entry(a, actions, list) {
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
goto nla_put_failure;
@@ -551,6 +494,8 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
if (a == NULL)
goto err_mod;
+ a->ops = a_o;
+ INIT_LIST_HEAD(&a->list);
/* backward compatibility for policer */
if (name == NULL)
err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind);
@@ -565,7 +510,6 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
*/
if (err != ACT_P_CREATED)
module_put(a_o->owner);
- a->ops = a_o;
return a;
@@ -577,37 +521,33 @@ err_out:
return ERR_PTR(err);
}
-struct tc_action *tcf_action_init(struct net *net, struct nlattr *nla,
+int tcf_action_init(struct net *net, struct nlattr *nla,
struct nlattr *est, char *name, int ovr,
- int bind)
+ int bind, struct list_head *actions)
{
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
- struct tc_action *head = NULL, *act, *act_prev = NULL;
+ struct tc_action *act;
int err;
int i;
err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
if (err < 0)
- return ERR_PTR(err);
+ return err;
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
act = tcf_action_init_1(net, tb[i], est, name, ovr, bind);
- if (IS_ERR(act))
+ if (IS_ERR(act)) {
+ err = PTR_ERR(act);
goto err;
+ }
act->order = i;
-
- if (head == NULL)
- head = act;
- else
- act_prev->next = act;
- act_prev = act;
+ list_add_tail(&act->list, actions);
}
- return head;
+ return 0;
err:
- if (head != NULL)
- tcf_action_destroy(head, bind);
- return act;
+ tcf_action_destroy(actions, bind);
+ return err;
}
int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
@@ -615,9 +555,9 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
{
int err = 0;
struct gnet_dump d;
- struct tcf_act_hdr *h = a->priv;
+ struct tcf_common *p = a->priv;
- if (h == NULL)
+ if (p == NULL)
goto errout;
/* compat_mode being true specifies a call that is supposed
@@ -626,24 +566,20 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (compat_mode) {
if (a->type == TCA_OLD_COMPAT)
err = gnet_stats_start_copy_compat(skb, 0,
- TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d);
+ TCA_STATS, TCA_XSTATS, &p->tcfc_lock, &d);
else
return 0;
} else
err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
- &h->tcf_lock, &d);
+ &p->tcfc_lock, &d);
if (err < 0)
goto errout;
- if (a->ops != NULL && a->ops->get_stats != NULL)
- if (a->ops->get_stats(skb, a) < 0)
- goto errout;
-
- if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
- gnet_stats_copy_rate_est(&d, &h->tcf_bstats,
- &h->tcf_rate_est) < 0 ||
- gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
+ if (gnet_stats_copy_basic(&d, &p->tcfc_bstats) < 0 ||
+ gnet_stats_copy_rate_est(&d, &p->tcfc_bstats,
+ &p->tcfc_rate_est) < 0 ||
+ gnet_stats_copy_queue(&d, &p->tcfc_qstats) < 0)
goto errout;
if (gnet_stats_finish_copy(&d) < 0)
@@ -656,7 +592,7 @@ errout:
}
static int
-tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 portid, u32 seq,
+tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq,
u16 flags, int event, int bind, int ref)
{
struct tcamsg *t;
@@ -676,7 +612,7 @@ tca_get_fill(struct sk_buff *skb, struct tc_action *a, u32 portid, u32 seq,
if (nest == NULL)
goto out_nlmsg_trim;
- if (tcf_action_dump(skb, a, bind, ref) < 0)
+ if (tcf_action_dump(skb, actions, bind, ref) < 0)
goto out_nlmsg_trim;
nla_nest_end(skb, nest);
@@ -691,14 +627,14 @@ out_nlmsg_trim:
static int
act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
- struct tc_action *a, int event)
+ struct list_head *actions, int event)
{
struct sk_buff *skb;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
- if (tca_get_fill(skb, a, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
+ if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) {
kfree_skb(skb);
return -EINVAL;
}
@@ -729,9 +665,10 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
if (a == NULL)
goto err_out;
+ INIT_LIST_HEAD(&a->list);
err = -EINVAL;
a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
- if (a->ops == NULL)
+ if (a->ops == NULL) /* could happen in batch of actions */
goto err_free;
err = -ENOENT;
if (a->ops->lookup(a, index) == 0)
@@ -748,12 +685,12 @@ err_out:
return ERR_PTR(err);
}
-static void cleanup_a(struct tc_action *act)
+static void cleanup_a(struct list_head *actions)
{
- struct tc_action *a;
+ struct tc_action *a, *tmp;
- for (a = act; a; a = act) {
- act = a->next;
+ list_for_each_entry_safe(a, tmp, actions, list) {
+ list_del(&a->list);
kfree(a);
}
}
@@ -768,6 +705,7 @@ static struct tc_action *create_a(int i)
return NULL;
}
act->order = i;
+ INIT_LIST_HEAD(&act->list);
return act;
}
@@ -806,7 +744,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
err = -EINVAL;
kind = tb[TCA_ACT_KIND];
a->ops = tc_lookup_action(kind);
- if (a->ops == NULL)
+ if (a->ops == NULL) /*some idjot trying to flush unknown action */
goto err_out;
nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
@@ -850,12 +788,40 @@ noflush_out:
}
static int
+tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
+ u32 portid)
+{
+ int ret;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (!skb)
+ return -ENOBUFS;
+
+ if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION,
+ 0, 1) <= 0) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ /* now do the delete */
+ tcf_action_destroy(actions, 0);
+
+ ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
+ if (ret > 0)
+ return 0;
+ return ret;
+}
+
+static int
tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
u32 portid, int event)
{
int i, ret;
struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
- struct tc_action *head = NULL, *act, *act_prev = NULL;
+ struct tc_action *act;
+ LIST_HEAD(actions);
ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL);
if (ret < 0)
@@ -875,117 +841,62 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
goto err;
}
act->order = i;
-
- if (head == NULL)
- head = act;
- else
- act_prev->next = act;
- act_prev = act;
+ list_add_tail(&act->list, &actions);
}
if (event == RTM_GETACTION)
- ret = act_get_notify(net, portid, n, head, event);
+ ret = act_get_notify(net, portid, n, &actions, event);
else { /* delete */
- struct sk_buff *skb;
-
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!skb) {
- ret = -ENOBUFS;
- goto err;
- }
-
- if (tca_get_fill(skb, head, portid, n->nlmsg_seq, 0, event,
- 0, 1) <= 0) {
- kfree_skb(skb);
- ret = -EINVAL;
+ ret = tcf_del_notify(net, n, &actions, portid);
+ if (ret)
goto err;
- }
-
- /* now do the delete */
- tcf_action_destroy(head, 0);
- ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
- n->nlmsg_flags & NLM_F_ECHO);
- if (ret > 0)
- return 0;
return ret;
}
err:
- cleanup_a(head);
+ cleanup_a(&actions);
return ret;
}
-static int tcf_add_notify(struct net *net, struct tc_action *a,
- u32 portid, u32 seq, int event, u16 flags)
+static int
+tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
+ u32 portid)
{
- struct tcamsg *t;
- struct nlmsghdr *nlh;
struct sk_buff *skb;
- struct nlattr *nest;
- unsigned char *b;
int err = 0;
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb)
return -ENOBUFS;
- b = skb_tail_pointer(skb);
-
- nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags);
- if (!nlh)
- goto out_kfree_skb;
- t = nlmsg_data(nlh);
- t->tca_family = AF_UNSPEC;
- t->tca__pad1 = 0;
- t->tca__pad2 = 0;
-
- nest = nla_nest_start(skb, TCA_ACT_TAB);
- if (nest == NULL)
- goto out_kfree_skb;
-
- if (tcf_action_dump(skb, a, 0, 0) < 0)
- goto out_kfree_skb;
-
- nla_nest_end(skb, nest);
-
- nlh->nlmsg_len = skb_tail_pointer(skb) - b;
- NETLINK_CB(skb).dst_group = RTNLGRP_TC;
+ if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags,
+ RTM_NEWACTION, 0, 0) <= 0) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
- err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, flags & NLM_F_ECHO);
+ err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
+ n->nlmsg_flags & NLM_F_ECHO);
if (err > 0)
err = 0;
return err;
-
-out_kfree_skb:
- kfree_skb(skb);
- return -1;
}
-
static int
tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
u32 portid, int ovr)
{
int ret = 0;
- struct tc_action *act;
- struct tc_action *a;
- u32 seq = n->nlmsg_seq;
+ LIST_HEAD(actions);
- act = tcf_action_init(net, nla, NULL, NULL, ovr, 0);
- if (act == NULL)
- goto done;
- if (IS_ERR(act)) {
- ret = PTR_ERR(act);
+ ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions);
+ if (ret)
goto done;
- }
/* dump then free all the actions after update; inserted policy
* stays intact
*/
- ret = tcf_add_notify(net, act, portid, seq, RTM_NEWACTION, n->nlmsg_flags);
- for (a = act; a; a = act) {
- act = a->next;
- kfree(a);
- }
+ ret = tcf_add_notify(net, n, &actions, portid);
+ cleanup_a(&actions);
done:
return ret;
}