summaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/Kconfig31
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c23
-rw-r--r--net/sched/act_gact.c11
-rw-r--r--net/sched/act_ipt.c12
-rw-r--r--net/sched/act_mirred.c12
-rw-r--r--net/sched/act_pedit.c14
-rw-r--r--net/sched/act_police.c278
-rw-r--r--net/sched/act_simple.c1
-rw-r--r--net/sched/cls_api.c50
-rw-r--r--net/sched/cls_basic.c1
-rw-r--r--net/sched/cls_fw.c19
-rw-r--r--net/sched/cls_route.c20
-rw-r--r--net/sched/cls_rsvp.c17
-rw-r--r--net/sched/cls_rsvp6.c16
-rw-r--r--net/sched/cls_tcindex.c3
-rw-r--r--net/sched/cls_u32.c21
-rw-r--r--net/sched/em_cmp.c1
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/em_nbyte.c2
-rw-r--r--net/sched/em_text.c2
-rw-r--r--net/sched/em_u32.c2
-rw-r--r--net/sched/ematch.c15
-rw-r--r--net/sched/sch_api.c91
-rw-r--r--net/sched/sch_atm.c480
-rw-r--r--net/sched/sch_blackhole.c1
-rw-r--r--net/sched/sch_cbq.c90
-rw-r--r--net/sched/sch_dsmark.c35
-rw-r--r--net/sched/sch_fifo.c1
-rw-r--r--net/sched/sch_generic.c222
-rw-r--r--net/sched/sch_gred.c1
-rw-r--r--net/sched/sch_hfsc.c24
-rw-r--r--net/sched/sch_htb.c131
-rw-r--r--net/sched/sch_ingress.c28
-rw-r--r--net/sched/sch_netem.c2
-rw-r--r--net/sched/sch_prio.c145
-rw-r--r--net/sched/sch_red.c1
-rw-r--r--net/sched/sch_sfq.c18
-rw-r--r--net/sched/sch_tbf.c21
-rw-r--r--net/sched/sch_teql.c24
40 files changed, 664 insertions, 1205 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 475df8449be9..d3f7c3f9407a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -111,6 +111,17 @@ config NET_SCH_PRIO
To compile this code as a module, choose M here: the
module will be called sch_prio.
+config NET_SCH_RR
+ tristate "Multi Band Round Robin Queuing (RR)"
+ select NET_SCH_PRIO
+ ---help---
+ Say Y here if you want to use an n-band round robin packet
+ scheduler.
+
+ The module uses sch_prio for its framework and is aliased as
+ sch_rr, so it will load sch_prio, although it is referred
+ to using sch_rr.
+
config NET_SCH_RED
tristate "Random Early Detection (RED)"
---help---
@@ -275,7 +286,6 @@ config CLS_U32_MARK
config NET_CLS_RSVP
tristate "IPv4 Resource Reservation Protocol (RSVP)"
select NET_CLS
- select NET_ESTIMATOR
---help---
The Resource Reservation Protocol (RSVP) permits end systems to
request a minimum and maximum data flow rate for a connection; this
@@ -290,7 +300,6 @@ config NET_CLS_RSVP
config NET_CLS_RSVP6
tristate "IPv6 Resource Reservation Protocol (RSVP6)"
select NET_CLS
- select NET_ESTIMATOR
---help---
The Resource Reservation Protocol (RSVP) permits end systems to
request a minimum and maximum data flow rate for a connection; this
@@ -382,7 +391,6 @@ config NET_EMATCH_TEXT
config NET_CLS_ACT
bool "Actions"
- select NET_ESTIMATOR
---help---
Say Y here if you want to use traffic control actions. Actions
get attached to classifiers and are invoked after a successful
@@ -464,13 +472,12 @@ config NET_ACT_SIMP
config NET_CLS_POLICE
bool "Traffic Policing (obsolete)"
- depends on NET_CLS_ACT!=y
- select NET_ESTIMATOR
+ select NET_CLS_ACT
+ select NET_ACT_POLICE
---help---
Say Y here if you want to do traffic policing, i.e. strict
- bandwidth limiting. This option is obsoleted by the traffic
- policer implemented as action, it stays here for compatibility
- reasons.
+ bandwidth limiting. This option is obsolete and just selects
+ the option replacing it. It will be removed in the future.
config NET_CLS_IND
bool "Incoming device classification"
@@ -480,14 +487,6 @@ config NET_CLS_IND
classification based on the incoming device. This option is
likely to disappear in favour of the metadata ematch.
-config NET_ESTIMATOR
- bool "Rate estimator"
- ---help---
- Say Y here to allow using rate estimators to estimate the current
- rate-of-flow for network devices, queues, etc. This module is
- automatically selected if needed but can be selected manually for
- statistical purposes.
-
endif # NET_SCHED
endmenu
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 020767a204d4..b67c36f65cf2 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_NET_SCHED) += sch_api.o sch_blackhole.o
obj-$(CONFIG_NET_CLS) += cls_api.o
obj-$(CONFIG_NET_CLS_ACT) += act_api.o
obj-$(CONFIG_NET_ACT_POLICE) += act_police.o
-obj-$(CONFIG_NET_CLS_POLICE) += act_police.o
obj-$(CONFIG_NET_ACT_GACT) += act_gact.o
obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o
obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 711dd26c95c3..feef366cad5d 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -11,23 +11,13 @@
*
*/
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/kmod.h>
-#include <net/sock.h>
#include <net/sch_generic.h>
#include <net/act_api.h>
#include <net/netlink.h>
@@ -42,10 +32,8 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
write_lock_bh(hinfo->lock);
*p1p = p->tcfc_next;
write_unlock_bh(hinfo->lock);
-#ifdef CONFIG_NET_ESTIMATOR
gen_kill_estimator(&p->tcfc_bstats,
&p->tcfc_rate_est);
-#endif
kfree(p);
return;
}
@@ -232,15 +220,12 @@ struct tcf_common *tcf_hash_create(u32 index, struct rtattr *est, struct tc_acti
p->tcfc_bindcnt = 1;
spin_lock_init(&p->tcfc_lock);
- p->tcfc_stats_lock = &p->tcfc_lock;
p->tcfc_index = index ? index : tcf_hash_new_index(idx_gen, hinfo);
p->tcfc_tm.install = jiffies;
p->tcfc_tm.lastuse = jiffies;
-#ifdef CONFIG_NET_ESTIMATOR
if (est)
gen_new_estimator(&p->tcfc_bstats, &p->tcfc_rate_est,
- p->tcfc_stats_lock, est);
-#endif
+ &p->tcfc_lock, est);
a->priv = (void *) p;
return p;
}
@@ -599,12 +584,12 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
if (compat_mode) {
if (a->type == TCA_OLD_COMPAT)
err = gnet_stats_start_copy_compat(skb, 0,
- TCA_STATS, TCA_XSTATS, h->tcf_stats_lock, &d);
+ TCA_STATS, TCA_XSTATS, &h->tcf_lock, &d);
else
return 0;
} else
err = gnet_stats_start_copy(skb, TCA_ACT_STATS,
- h->tcf_stats_lock, &d);
+ &h->tcf_lock, &d);
if (err < 0)
goto errout;
@@ -614,9 +599,7 @@ int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a,
goto errout;
if (gnet_stats_copy_basic(&d, &h->tcf_bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
gnet_stats_copy_rate_est(&d, &h->tcf_rate_est) < 0 ||
-#endif
gnet_stats_copy_queue(&d, &h->tcf_qstats) < 0)
goto errout;
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index 7517f3791541..a9631e426d91 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -10,26 +10,15 @@
*
*/
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
#include <linux/tc_act/tc_gact.h>
#include <net/tc_act/tc_gact.h>
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 00b05f422d45..6b407ece953c 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -11,27 +11,15 @@
* Copyright: Jamal Hadi Salim (2002-4)
*/
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/proc_fs.h>
-#include <linux/kmod.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
#include <linux/tc_act/tc_ipt.h>
#include <net/tc_act/tc_ipt.h>
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index de21c92faaa2..579578944ae7 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -12,31 +12,19 @@
*
*/
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
#include <linux/tc_act/tc_mirred.h>
#include <net/tc_act/tc_mirred.h>
-#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#define MIRRED_TAB_MASK 7
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 45b3cda86a21..b46fab5fb323 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -9,26 +9,15 @@
* Authors: Jamal Hadi Salim (2002-4)
*/
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/proc_fs.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
#include <linux/tc_act/tc_pedit.h>
#include <net/tc_act/tc_pedit.h>
@@ -164,8 +153,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
printk("offset must be on 32 bit boundaries\n");
goto bad;
}
- if (skb->len < 0 ||
- (offset > 0 && offset > skb->len)) {
+ if (offset > 0 && offset > skb->len) {
printk("offset %d cant exceed pkt length %d\n",
offset, skb->len);
goto bad;
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 616f465f407e..bf90e60f8411 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -10,25 +10,15 @@
* J Hadi Salim (action changes)
*/
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/module.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
-#include <net/sock.h>
#include <net/act_api.h>
#include <net/netlink.h>
@@ -60,7 +50,6 @@ struct tc_police_compat
/* Each policer is serialized by its individual spinlock */
-#ifdef CONFIG_NET_CLS_ACT
static int tcf_act_police_walker(struct sk_buff *skb, struct netlink_callback *cb,
int type, struct tc_action *a)
{
@@ -106,9 +95,8 @@ rtattr_failure:
nlmsg_trim(skb, r);
goto done;
}
-#endif
-void tcf_police_destroy(struct tcf_police *p)
+static void tcf_police_destroy(struct tcf_police *p)
{
unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
struct tcf_common **p1p;
@@ -118,10 +106,8 @@ void tcf_police_destroy(struct tcf_police *p)
write_lock_bh(&police_lock);
*p1p = p->tcf_next;
write_unlock_bh(&police_lock);
-#ifdef CONFIG_NET_ESTIMATOR
gen_kill_estimator(&p->tcf_bstats,
&p->tcf_rate_est);
-#endif
if (p->tcfp_R_tab)
qdisc_put_rtab(p->tcfp_R_tab);
if (p->tcfp_P_tab)
@@ -133,7 +119,6 @@ void tcf_police_destroy(struct tcf_police *p)
BUG_TRAP(0);
}
-#ifdef CONFIG_NET_CLS_ACT
static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
struct tc_action *a, int ovr, int bind)
{
@@ -185,7 +170,6 @@ static int tcf_act_police_locate(struct rtattr *rta, struct rtattr *est,
ret = ACT_P_CREATED;
police->tcf_refcnt = 1;
spin_lock_init(&police->tcf_lock);
- police->tcf_stats_lock = &police->tcf_lock;
if (bind)
police->tcf_bindcnt = 1;
override:
@@ -227,15 +211,13 @@ override:
police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
police->tcf_action = parm->action;
-#ifdef CONFIG_NET_ESTIMATOR
if (tb[TCA_POLICE_AVRATE-1])
police->tcfp_ewma_rate =
*(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
if (est)
gen_replace_estimator(&police->tcf_bstats,
&police->tcf_rate_est,
- police->tcf_stats_lock, est);
-#endif
+ &police->tcf_lock, est);
spin_unlock_bh(&police->tcf_lock);
if (ret != ACT_P_CREATED)
@@ -262,10 +244,19 @@ failure:
static int tcf_act_police_cleanup(struct tc_action *a, int bind)
{
struct tcf_police *p = a->priv;
+ int ret = 0;
- if (p != NULL)
- return tcf_police_release(p, bind);
- return 0;
+ if (p != NULL) {
+ if (bind)
+ p->tcf_bindcnt--;
+
+ p->tcf_refcnt--;
+ if (p->tcf_refcnt <= 0 && !p->tcf_bindcnt) {
+ tcf_police_destroy(p);
+ ret = 1;
+ }
+ }
+ return ret;
}
static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
@@ -281,14 +272,12 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
police->tcf_bstats.bytes += skb->len;
police->tcf_bstats.packets++;
-#ifdef CONFIG_NET_ESTIMATOR
if (police->tcfp_ewma_rate &&
police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
police->tcf_qstats.overlimits++;
spin_unlock(&police->tcf_lock);
return police->tcf_action;
}
-#endif
if (skb->len <= police->tcfp_mtu) {
if (police->tcfp_R_tab == NULL) {
@@ -348,10 +337,8 @@ tcf_act_police_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
if (police->tcfp_result)
RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
&police->tcfp_result);
-#ifdef CONFIG_NET_ESTIMATOR
if (police->tcfp_ewma_rate)
RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
-#endif
return skb->len;
rtattr_failure:
@@ -391,240 +378,3 @@ police_cleanup_module(void)
module_init(police_init_module);
module_exit(police_cleanup_module);
-
-#else /* CONFIG_NET_CLS_ACT */
-
-static struct tcf_common *tcf_police_lookup(u32 index)
-{
- struct tcf_hashinfo *hinfo = &police_hash_info;
- struct tcf_common *p;
-
- read_lock(hinfo->lock);
- for (p = hinfo->htab[tcf_hash(index, hinfo->hmask)]; p;
- p = p->tcfc_next) {
- if (p->tcfc_index == index)
- break;
- }
- read_unlock(hinfo->lock);
-
- return p;
-}
-
-static u32 tcf_police_new_index(void)
-{
- u32 *idx_gen = &police_idx_gen;
- u32 val = *idx_gen;
-
- do {
- if (++val == 0)
- val = 1;
- } while (tcf_police_lookup(val));
-
- return (*idx_gen = val);
-}
-
-struct tcf_police *tcf_police_locate(struct rtattr *rta, struct rtattr *est)
-{
- unsigned int h;
- struct tcf_police *police;
- struct rtattr *tb[TCA_POLICE_MAX];
- struct tc_police *parm;
- int size;
-
- if (rtattr_parse_nested(tb, TCA_POLICE_MAX, rta) < 0)
- return NULL;
-
- if (tb[TCA_POLICE_TBF-1] == NULL)
- return NULL;
- size = RTA_PAYLOAD(tb[TCA_POLICE_TBF-1]);
- if (size != sizeof(*parm) && size != sizeof(struct tc_police_compat))
- return NULL;
-
- parm = RTA_DATA(tb[TCA_POLICE_TBF-1]);
-
- if (parm->index) {
- struct tcf_common *pc;
-
- pc = tcf_police_lookup(parm->index);
- if (pc) {
- police = to_police(pc);
- police->tcf_refcnt++;
- return police;
- }
- }
- police = kzalloc(sizeof(*police), GFP_KERNEL);
- if (unlikely(!police))
- return NULL;
-
- police->tcf_refcnt = 1;
- spin_lock_init(&police->tcf_lock);
- police->tcf_stats_lock = &police->tcf_lock;
- if (parm->rate.rate) {
- police->tcfp_R_tab =
- qdisc_get_rtab(&parm->rate, tb[TCA_POLICE_RATE-1]);
- if (police->tcfp_R_tab == NULL)
- goto failure;
- if (parm->peakrate.rate) {
- police->tcfp_P_tab =
- qdisc_get_rtab(&parm->peakrate,
- tb[TCA_POLICE_PEAKRATE-1]);
- if (police->tcfp_P_tab == NULL)
- goto failure;
- }
- }
- if (tb[TCA_POLICE_RESULT-1]) {
- if (RTA_PAYLOAD(tb[TCA_POLICE_RESULT-1]) != sizeof(u32))
- goto failure;
- police->tcfp_result = *(u32*)RTA_DATA(tb[TCA_POLICE_RESULT-1]);
- }
-#ifdef CONFIG_NET_ESTIMATOR
- if (tb[TCA_POLICE_AVRATE-1]) {
- if (RTA_PAYLOAD(tb[TCA_POLICE_AVRATE-1]) != sizeof(u32))
- goto failure;
- police->tcfp_ewma_rate =
- *(u32*)RTA_DATA(tb[TCA_POLICE_AVRATE-1]);
- }
-#endif
- police->tcfp_toks = police->tcfp_burst = parm->burst;
- police->tcfp_mtu = parm->mtu;
- if (police->tcfp_mtu == 0) {
- police->tcfp_mtu = ~0;
- if (police->tcfp_R_tab)
- police->tcfp_mtu = 255<<police->tcfp_R_tab->rate.cell_log;
- }
- if (police->tcfp_P_tab)
- police->tcfp_ptoks = L2T_P(police, police->tcfp_mtu);
- police->tcfp_t_c = psched_get_time();
- police->tcf_index = parm->index ? parm->index :
- tcf_police_new_index();
- police->tcf_action = parm->action;
-#ifdef CONFIG_NET_ESTIMATOR
- if (est)
- gen_new_estimator(&police->tcf_bstats, &police->tcf_rate_est,
- police->tcf_stats_lock, est);
-#endif
- h = tcf_hash(police->tcf_index, POL_TAB_MASK);
- write_lock_bh(&police_lock);
- police->tcf_next = tcf_police_ht[h];
- tcf_police_ht[h] = &police->common;
- write_unlock_bh(&police_lock);
- return police;
-
-failure:
- if (police->tcfp_R_tab)
- qdisc_put_rtab(police->tcfp_R_tab);
- kfree(police);
- return NULL;
-}
-
-int tcf_police(struct sk_buff *skb, struct tcf_police *police)
-{
- psched_time_t now;
- long toks;
- long ptoks = 0;
-
- spin_lock(&police->tcf_lock);
-
- police->tcf_bstats.bytes += skb->len;
- police->tcf_bstats.packets++;
-
-#ifdef CONFIG_NET_ESTIMATOR
- if (police->tcfp_ewma_rate &&
- police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
- police->tcf_qstats.overlimits++;
- spin_unlock(&police->tcf_lock);
- return police->tcf_action;
- }
-#endif
- if (skb->len <= police->tcfp_mtu) {
- if (police->tcfp_R_tab == NULL) {
- spin_unlock(&police->tcf_lock);
- return police->tcfp_result;
- }
-
- now = psched_get_time();
- toks = psched_tdiff_bounded(now, police->tcfp_t_c,
- police->tcfp_burst);
- if (police->tcfp_P_tab) {
- ptoks = toks + police->tcfp_ptoks;
- if (ptoks > (long)L2T_P(police, police->tcfp_mtu))
- ptoks = (long)L2T_P(police, police->tcfp_mtu);
- ptoks -= L2T_P(police, skb->len);
- }
- toks += police->tcfp_toks;
- if (toks > (long)police->tcfp_burst)
- toks = police->tcfp_burst;
- toks -= L2T(police, skb->len);
- if ((toks|ptoks) >= 0) {
- police->tcfp_t_c = now;
- police->tcfp_toks = toks;
- police->tcfp_ptoks = ptoks;
- spin_unlock(&police->tcf_lock);
- return police->tcfp_result;
- }
- }
-
- police->tcf_qstats.overlimits++;
- spin_unlock(&police->tcf_lock);
- return police->tcf_action;
-}
-EXPORT_SYMBOL(tcf_police);
-
-int tcf_police_dump(struct sk_buff *skb, struct tcf_police *police)
-{
- unsigned char *b = skb_tail_pointer(skb);
- struct tc_police opt;
-
- opt.index = police->tcf_index;
- opt.action = police->tcf_action;
- opt.mtu = police->tcfp_mtu;
- opt.burst = police->tcfp_burst;
- if (police->tcfp_R_tab)
- opt.rate = police->tcfp_R_tab->rate;
- else
- memset(&opt.rate, 0, sizeof(opt.rate));
- if (police->tcfp_P_tab)
- opt.peakrate = police->tcfp_P_tab->rate;
- else
- memset(&opt.peakrate, 0, sizeof(opt.peakrate));
- RTA_PUT(skb, TCA_POLICE_TBF, sizeof(opt), &opt);
- if (police->tcfp_result)
- RTA_PUT(skb, TCA_POLICE_RESULT, sizeof(int),
- &police->tcfp_result);
-#ifdef CONFIG_NET_ESTIMATOR
- if (police->tcfp_ewma_rate)
- RTA_PUT(skb, TCA_POLICE_AVRATE, 4, &police->tcfp_ewma_rate);
-#endif
- return skb->len;
-
-rtattr_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-
-int tcf_police_dump_stats(struct sk_buff *skb, struct tcf_police *police)
-{
- struct gnet_dump d;
-
- if (gnet_stats_start_copy_compat(skb, TCA_STATS2, TCA_STATS,
- TCA_XSTATS, police->tcf_stats_lock,
- &d) < 0)
- goto errout;
-
- if (gnet_stats_copy_basic(&d, &police->tcf_bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
- gnet_stats_copy_rate_est(&d, &police->tcf_rate_est) < 0 ||
-#endif
- gnet_stats_copy_queue(&d, &police->tcf_qstats) < 0)
- goto errout;
-
- if (gnet_stats_finish_copy(&d) < 0)
- goto errout;
-
- return 0;
-
-errout:
- return -1;
-}
-
-#endif /* CONFIG_NET_CLS_ACT */
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 36e1edad5990..fb84ef33d14f 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <net/netlink.h>
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index ebf94edf0478..5f0fbca7393f 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -14,26 +14,16 @@
*
*/
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/kmod.h>
#include <linux/netlink.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
@@ -468,11 +458,6 @@ tcf_exts_destroy(struct tcf_proto *tp, struct tcf_exts *exts)
tcf_action_destroy(exts->action, TCA_ACT_UNBIND);
exts->action = NULL;
}
-#elif defined CONFIG_NET_CLS_POLICE
- if (exts->police) {
- tcf_police_release(exts->police, TCA_ACT_UNBIND);
- exts->police = NULL;
- }
#endif
}
@@ -506,17 +491,6 @@ tcf_exts_validate(struct tcf_proto *tp, struct rtattr **tb,
exts->action = act;
}
}
-#elif defined CONFIG_NET_CLS_POLICE
- if (map->police && tb[map->police-1]) {
- struct tcf_police *p;
-
- p = tcf_police_locate(tb[map->police-1], rate_tlv);
- if (p == NULL)
- return -EINVAL;
-
- exts->police = p;
- } else if (map->action && tb[map->action-1])
- return -EOPNOTSUPP;
#else
if ((map->action && tb[map->action-1]) ||
(map->police && tb[map->police-1]))
@@ -539,15 +513,6 @@ tcf_exts_change(struct tcf_proto *tp, struct tcf_exts *dst,
if (act)
tcf_action_destroy(act, TCA_ACT_UNBIND);
}
-#elif defined CONFIG_NET_CLS_POLICE
- if (src->police) {
- struct tcf_police *p;
- tcf_tree_lock(tp);
- p = xchg(&dst->police, src->police);
- tcf_tree_unlock(tp);
- if (p)
- tcf_police_release(p, TCA_ACT_UNBIND);
- }
#endif
}
@@ -576,17 +541,6 @@ tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts,
p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
}
}
-#elif defined CONFIG_NET_CLS_POLICE
- if (map->police && exts->police) {
- struct rtattr *p_rta = (struct rtattr *)skb_tail_pointer(skb);
-
- RTA_PUT(skb, map->police, 0, NULL);
-
- if (tcf_police_dump(skb, exts->police) < 0)
- goto rtattr_failure;
-
- p_rta->rta_len = skb_tail_pointer(skb) - (u8 *)p_rta;
- }
#endif
return 0;
rtattr_failure: __attribute__ ((unused))
@@ -601,10 +555,6 @@ tcf_exts_dump_stats(struct sk_buff *skb, struct tcf_exts *exts,
if (exts->action)
if (tcf_action_copy_stats(skb, exts->action, 1) < 0)
goto rtattr_failure;
-#elif defined CONFIG_NET_CLS_POLICE
- if (exts->police)
- if (tcf_police_dump_stats(skb, exts->police) < 0)
- goto rtattr_failure;
#endif
return 0;
rtattr_failure: __attribute__ ((unused))
diff --git a/net/sched/cls_basic.c b/net/sched/cls_basic.c
index c885412d79d5..8dbcf2771a46 100644
--- a/net/sched/cls_basic.c
+++ b/net/sched/cls_basic.c
@@ -13,7 +13,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
#include <linux/errno.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c
index bbec4a0d4dcb..8adbd6a37d14 100644
--- a/net/sched/cls_fw.c
+++ b/net/sched/cls_fw.c
@@ -19,29 +19,12 @@
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <linux/netfilter.h>
-#include <net/ip.h>
-#include <net/netlink.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
+#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c
index cc941d0ee3a5..0a8409c1d28a 100644
--- a/net/sched/cls_route.c
+++ b/net/sched/cls_route.c
@@ -10,28 +10,14 @@
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/netlink.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
+#include <net/dst.h>
+#include <net/route.h>
+#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
diff --git a/net/sched/cls_rsvp.c b/net/sched/cls_rsvp.c
index 0a683c07c648..cbb5e0d600f3 100644
--- a/net/sched/cls_rsvp.c
+++ b/net/sched/cls_rsvp.c
@@ -10,27 +10,12 @@
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
+#include <net/ip.h>
#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
diff --git a/net/sched/cls_rsvp6.c b/net/sched/cls_rsvp6.c
index 93b6abed57db..dd08aea2aee5 100644
--- a/net/sched/cls_rsvp6.c
+++ b/net/sched/cls_rsvp6.c
@@ -10,28 +10,12 @@
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
#include <linux/ipv6.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
#include <net/netlink.h>
diff --git a/net/sched/cls_tcindex.c b/net/sched/cls_tcindex.c
index 47ac0c556429..2314820a080a 100644
--- a/net/sched/cls_tcindex.c
+++ b/net/sched/cls_tcindex.c
@@ -9,12 +9,9 @@
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/errno.h>
-#include <linux/netdevice.h>
-#include <net/ip.h>
#include <net/act_api.h>
#include <net/netlink.h>
#include <net/pkt_cls.h>
-#include <net/route.h>
/*
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index c7a347bd6d70..8dbe36912ecb 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -30,30 +30,14 @@
* nfmark match added by Catalin(ux aka Dino) BOIE <catab at umbrella.ro>
*/
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
#include <linux/rtnetlink.h>
-#include <net/ip.h>
-#include <net/netlink.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
+#include <net/netlink.h>
#include <net/act_api.h>
#include <net/pkt_cls.h>
@@ -798,9 +782,6 @@ static int __init init_u32(void)
#ifdef CONFIG_CLS_U32_PERF
printk(" Performance counters on\n");
#endif
-#ifdef CONFIG_NET_CLS_POLICE
- printk(" OLD policer on \n");
-#endif
#ifdef CONFIG_NET_CLS_IND
printk(" input device check on \n");
#endif
diff --git a/net/sched/em_cmp.c b/net/sched/em_cmp.c
index 8d6dacd81900..cc49c932641d 100644
--- a/net/sched/em_cmp.c
+++ b/net/sched/em_cmp.c
@@ -98,3 +98,4 @@ MODULE_LICENSE("GPL");
module_init(init_em_cmp);
module_exit(exit_em_cmp);
+MODULE_ALIAS_TCF_EMATCH(TCF_EM_CMP);
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 60acf8cdb27b..650f09c8bd6a 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -848,3 +848,5 @@ MODULE_LICENSE("GPL");
module_init(init_em_meta);
module_exit(exit_em_meta);
+
+MODULE_ALIAS_TCF_EMATCH(TCF_EM_META);
diff --git a/net/sched/em_nbyte.c b/net/sched/em_nbyte.c
index b4b36efce292..370a1b2ea317 100644
--- a/net/sched/em_nbyte.c
+++ b/net/sched/em_nbyte.c
@@ -76,3 +76,5 @@ MODULE_LICENSE("GPL");
module_init(init_em_nbyte);
module_exit(exit_em_nbyte);
+
+MODULE_ALIAS_TCF_EMATCH(TCF_EM_NBYTE);
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index e8f46169449d..d5cd86efb7d0 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -150,3 +150,5 @@ MODULE_LICENSE("GPL");
module_init(init_em_text);
module_exit(exit_em_text);
+
+MODULE_ALIAS_TCF_EMATCH(TCF_EM_TEXT);
diff --git a/net/sched/em_u32.c b/net/sched/em_u32.c
index 0a2a7fe08de3..112796e4a7c4 100644
--- a/net/sched/em_u32.c
+++ b/net/sched/em_u32.c
@@ -60,3 +60,5 @@ MODULE_LICENSE("GPL");
module_init(init_em_u32);
module_exit(exit_em_u32);
+
+MODULE_ALIAS_TCF_EMATCH(TCF_EM_U32);
diff --git a/net/sched/ematch.c b/net/sched/ematch.c
index 63146d339d81..f3a104e323bd 100644
--- a/net/sched/ematch.c
+++ b/net/sched/ematch.c
@@ -84,9 +84,7 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/mm.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <net/pkt_cls.h>
@@ -224,6 +222,19 @@ static int tcf_em_validate(struct tcf_proto *tp,
if (em->ops == NULL) {
err = -ENOENT;
+#ifdef CONFIG_KMOD
+ __rtnl_unlock();
+ request_module("ematch-kind-%u", em_hdr->kind);
+ rtnl_lock();
+ em->ops = tcf_em_lookup(em_hdr->kind);
+ if (em->ops) {
+ /* We dropped the RTNL mutex in order to
+ * perform the module load. Tell the caller
+ * to replay the request. */
+ module_put(em->ops->owner);
+ err = -EAGAIN;
+ }
+#endif
goto errout;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index bec600af03ca..13c09bc32aa3 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -19,30 +19,18 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/kmod.h>
#include <linux/list.h>
-#include <linux/bitops.h>
#include <linux/hrtimer.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
-#include <asm/processor.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-
static int qdisc_notify(struct sk_buff *oskb, struct nlmsghdr *n, u32 clid,
struct Qdisc *old, struct Qdisc *new);
static int tclass_notify(struct sk_buff *oskb, struct nlmsghdr *n,
@@ -290,11 +278,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
wd->qdisc->flags &= ~TCQ_F_THROTTLED;
smp_wmb();
- if (spin_trylock(&dev->queue_lock)) {
- qdisc_run(dev);
- spin_unlock(&dev->queue_lock);
- } else
- netif_schedule(dev);
+ netif_schedule(dev);
return HRTIMER_NORESTART;
}
@@ -515,7 +499,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
sch->handle = handle;
if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS-1])) == 0) {
-#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1]) {
err = gen_new_estimator(&sch->bstats, &sch->rate_est,
sch->stats_lock,
@@ -531,7 +514,6 @@ qdisc_create(struct net_device *dev, u32 handle, struct rtattr **tca, int *errp)
goto err_out3;
}
}
-#endif
qdisc_lock_tree(dev);
list_add_tail(&sch->list, &dev->qdisc_list);
qdisc_unlock_tree(dev);
@@ -559,11 +541,9 @@ static int qdisc_change(struct Qdisc *sch, struct rtattr **tca)
if (err)
return err;
}
-#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
gen_replace_estimator(&sch->bstats, &sch->rate_est,
sch->stats_lock, tca[TCA_RATE-1]);
-#endif
return 0;
}
@@ -839,9 +819,7 @@ static int tc_fill_qdisc(struct sk_buff *skb, struct Qdisc *q, u32 clid,
goto rtattr_failure;
if (gnet_stats_copy_basic(&d, &q->bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
gnet_stats_copy_rate_est(&d, &q->rate_est) < 0 ||
-#endif
gnet_stats_copy_queue(&d, &q->qstats) < 0)
goto rtattr_failure;
@@ -1167,47 +1145,57 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
to this qdisc, (optionally) tests for protocol and asks
specific classifiers.
*/
+int tc_classify_compat(struct sk_buff *skb, struct tcf_proto *tp,
+ struct tcf_result *res)
+{
+ __be16 protocol = skb->protocol;
+ int err = 0;
+
+ for (; tp; tp = tp->next) {
+ if ((tp->protocol == protocol ||
+ tp->protocol == htons(ETH_P_ALL)) &&
+ (err = tp->classify(skb, tp, res)) >= 0) {
+#ifdef CONFIG_NET_CLS_ACT
+ if (err != TC_ACT_RECLASSIFY && skb->tc_verd)
+ skb->tc_verd = SET_TC_VERD(skb->tc_verd, 0);
+#endif
+ return err;
+ }
+ }
+ return -1;
+}
+EXPORT_SYMBOL(tc_classify_compat);
+
int tc_classify(struct sk_buff *skb, struct tcf_proto *tp,
- struct tcf_result *res)
+ struct tcf_result *res)
{
int err = 0;
- __be16 protocol = skb->protocol;
+ __be16 protocol;
#ifdef CONFIG_NET_CLS_ACT
struct tcf_proto *otp = tp;
reclassify:
#endif
protocol = skb->protocol;
- for ( ; tp; tp = tp->next) {
- if ((tp->protocol == protocol ||
- tp->protocol == htons(ETH_P_ALL)) &&
- (err = tp->classify(skb, tp, res)) >= 0) {
+ err = tc_classify_compat(skb, tp, res);
#ifdef CONFIG_NET_CLS_ACT
- if ( TC_ACT_RECLASSIFY == err) {
- __u32 verd = (__u32) G_TC_VERD(skb->tc_verd);
- tp = otp;
-
- if (MAX_REC_LOOP < verd++) {
- printk("rule prio %d protocol %02x reclassify is buggy packet dropped\n",
- tp->prio&0xffff, ntohs(tp->protocol));
- return TC_ACT_SHOT;
- }
- skb->tc_verd = SET_TC_VERD(skb->tc_verd,verd);
- goto reclassify;
- } else {
- if (skb->tc_verd)
- skb->tc_verd = SET_TC_VERD(skb->tc_verd,0);
- return err;
- }
-#else
-
- return err;
-#endif
+ if (err == TC_ACT_RECLASSIFY) {
+ u32 verd = G_TC_VERD(skb->tc_verd);
+ tp = otp;
+
+ if (verd++ >= MAX_REC_LOOP) {
+ printk("rule prio %u protocol %02x reclassify loop, "
+ "packet dropped\n",
+ tp->prio&0xffff, ntohs(tp->protocol));
+ return TC_ACT_SHOT;
}
-
+ skb->tc_verd = SET_TC_VERD(skb->tc_verd, verd);
+ goto reclassify;
}
- return -1;
+#endif
+ return err;
}
+EXPORT_SYMBOL(tc_classify);
void tcf_destroy(struct tcf_proto *tp)
{
@@ -1274,4 +1262,3 @@ EXPORT_SYMBOL(qdisc_get_rtab);
EXPORT_SYMBOL(qdisc_put_rtab);
EXPORT_SYMBOL(register_qdisc);
EXPORT_SYMBOL(unregister_qdisc);
-EXPORT_SYMBOL(tc_classify);
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index be7d299acd73..417ec8fb7f1a 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -2,24 +2,19 @@
/* Written 1998-2000 by Werner Almesberger, EPFL ICA */
-
#include <linux/module.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
-#include <linux/interrupt.h>
#include <linux/atmdev.h>
#include <linux/atmclip.h>
-#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
-#include <linux/file.h> /* for fput */
+#include <linux/file.h> /* for fput */
#include <net/netlink.h>
#include <net/pkt_sched.h>
-#include <net/sock.h>
-
-extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
+extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
#if 0 /* control */
#define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
@@ -33,7 +28,6 @@ extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
#define D2PRINTK(format,args...)
#endif
-
/*
* The ATM queuing discipline provides a framework for invoking classifiers
* (aka "filters"), which in turn select classes of this queuing discipline.
@@ -55,23 +49,21 @@ extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
* - should lock the flow while there is data in the queue (?)
*/
-
#define PRIV(sch) qdisc_priv(sch)
#define VCC2FLOW(vcc) ((struct atm_flow_data *) ((vcc)->user_back))
-
struct atm_flow_data {
- struct Qdisc *q; /* FIFO, TBF, etc. */
+ struct Qdisc *q; /* FIFO, TBF, etc. */
struct tcf_proto *filter_list;
- struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
- void (*old_pop)(struct atm_vcc *vcc,struct sk_buff *skb); /* chaining */
+ struct atm_vcc *vcc; /* VCC; NULL if VCC is closed */
+ void (*old_pop)(struct atm_vcc *vcc,
+ struct sk_buff * skb); /* chaining */
struct atm_qdisc_data *parent; /* parent qdisc */
struct socket *sock; /* for closing */
u32 classid; /* x:y type ID */
int ref; /* reference count */
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
- spinlock_t *stats_lock;
struct atm_flow_data *next;
struct atm_flow_data *excess; /* flow for excess traffic;
NULL to set CLP instead */
@@ -86,76 +78,74 @@ struct atm_qdisc_data {
struct tasklet_struct task; /* requeue tasklet */
};
-
/* ------------------------- Class/flow operations ------------------------- */
-
-static int find_flow(struct atm_qdisc_data *qdisc,struct atm_flow_data *flow)
+static int find_flow(struct atm_qdisc_data *qdisc, struct atm_flow_data *flow)
{
struct atm_flow_data *walk;
- DPRINTK("find_flow(qdisc %p,flow %p)\n",qdisc,flow);
+ DPRINTK("find_flow(qdisc %p,flow %p)\n", qdisc, flow);
for (walk = qdisc->flows; walk; walk = walk->next)
- if (walk == flow) return 1;
+ if (walk == flow)
+ return 1;
DPRINTK("find_flow: not found\n");
return 0;
}
-
-static __inline__ struct atm_flow_data *lookup_flow(struct Qdisc *sch,
- u32 classid)
+static inline struct atm_flow_data *lookup_flow(struct Qdisc *sch, u32 classid)
{
struct atm_qdisc_data *p = PRIV(sch);
struct atm_flow_data *flow;
for (flow = p->flows; flow; flow = flow->next)
- if (flow->classid == classid) break;
+ if (flow->classid == classid)
+ break;
return flow;
}
-
-static int atm_tc_graft(struct Qdisc *sch,unsigned long arg,
- struct Qdisc *new,struct Qdisc **old)
+static int atm_tc_graft(struct Qdisc *sch, unsigned long arg,
+ struct Qdisc *new, struct Qdisc **old)
{
struct atm_qdisc_data *p = PRIV(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *) arg;
-
- DPRINTK("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",sch,
- p,flow,new,old);
- if (!find_flow(p,flow)) return -EINVAL;
- if (!new) new = &noop_qdisc;
- *old = xchg(&flow->q,new);
- if (*old) qdisc_reset(*old);
+ struct atm_flow_data *flow = (struct atm_flow_data *)arg;
+
+ DPRINTK("atm_tc_graft(sch %p,[qdisc %p],flow %p,new %p,old %p)\n",
+ sch, p, flow, new, old);
+ if (!find_flow(p, flow))
+ return -EINVAL;
+ if (!new)
+ new = &noop_qdisc;
+ *old = xchg(&flow->q, new);
+ if (*old)
+ qdisc_reset(*old);
return 0;
}
-
-static struct Qdisc *atm_tc_leaf(struct Qdisc *sch,unsigned long cl)
+static struct Qdisc *atm_tc_leaf(struct Qdisc *sch, unsigned long cl)
{
- struct atm_flow_data *flow = (struct atm_flow_data *) cl;
+ struct atm_flow_data *flow = (struct atm_flow_data *)cl;
- DPRINTK("atm_tc_leaf(sch %p,flow %p)\n",sch,flow);
+ DPRINTK("atm_tc_leaf(sch %p,flow %p)\n", sch, flow);
return flow ? flow->q : NULL;
}
-
-static unsigned long atm_tc_get(struct Qdisc *sch,u32 classid)
+static unsigned long atm_tc_get(struct Qdisc *sch, u32 classid)
{
- struct atm_qdisc_data *p __attribute__((unused)) = PRIV(sch);
+ struct atm_qdisc_data *p __maybe_unused = PRIV(sch);
struct atm_flow_data *flow;
- DPRINTK("atm_tc_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid);
- flow = lookup_flow(sch,classid);
- if (flow) flow->ref++;
- DPRINTK("atm_tc_get: flow %p\n",flow);
- return (unsigned long) flow;
+ DPRINTK("atm_tc_get(sch %p,[qdisc %p],classid %x)\n", sch, p, classid);
+ flow = lookup_flow(sch, classid);
+ if (flow)
+ flow->ref++;
+ DPRINTK("atm_tc_get: flow %p\n", flow);
+ return (unsigned long)flow;
}
-
static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
- unsigned long parent, u32 classid)
+ unsigned long parent, u32 classid)
{
- return atm_tc_get(sch,classid);
+ return atm_tc_get(sch, classid);
}
/*
@@ -163,72 +153,75 @@ static unsigned long atm_tc_bind_filter(struct Qdisc *sch,
* requested (atm_tc_destroy, etc.). The assumption here is that we never drop
* anything that still seems to be in use.
*/
-
static void atm_tc_put(struct Qdisc *sch, unsigned long cl)
{
struct atm_qdisc_data *p = PRIV(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *) cl;
+ struct atm_flow_data *flow = (struct atm_flow_data *)cl;
struct atm_flow_data **prev;
- DPRINTK("atm_tc_put(sch %p,[qdisc %p],flow %p)\n",sch,p,flow);
- if (--flow->ref) return;
+ DPRINTK("atm_tc_put(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
+ if (--flow->ref)
+ return;
DPRINTK("atm_tc_put: destroying\n");
for (prev = &p->flows; *prev; prev = &(*prev)->next)
- if (*prev == flow) break;
+ if (*prev == flow)
+ break;
if (!*prev) {
- printk(KERN_CRIT "atm_tc_put: class %p not found\n",flow);
+ printk(KERN_CRIT "atm_tc_put: class %p not found\n", flow);
return;
}
*prev = flow->next;
- DPRINTK("atm_tc_put: qdisc %p\n",flow->q);
+ DPRINTK("atm_tc_put: qdisc %p\n", flow->q);
qdisc_destroy(flow->q);
tcf_destroy_chain(flow->filter_list);
if (flow->sock) {
DPRINTK("atm_tc_put: f_count %d\n",
- file_count(flow->sock->file));
+ file_count(flow->sock->file));
flow->vcc->pop = flow->old_pop;
sockfd_put(flow->sock);
}
- if (flow->excess) atm_tc_put(sch,(unsigned long) flow->excess);
- if (flow != &p->link) kfree(flow);
+ if (flow->excess)
+ atm_tc_put(sch, (unsigned long)flow->excess);
+ if (flow != &p->link)
+ kfree(flow);
/*
* If flow == &p->link, the qdisc no longer works at this point and
* needs to be removed. (By the caller of atm_tc_put.)
*/
}
-
-static void sch_atm_pop(struct atm_vcc *vcc,struct sk_buff *skb)
+static void sch_atm_pop(struct atm_vcc *vcc, struct sk_buff *skb)
{
struct atm_qdisc_data *p = VCC2FLOW(vcc)->parent;
- D2PRINTK("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n",vcc,skb,p);
- VCC2FLOW(vcc)->old_pop(vcc,skb);
+ D2PRINTK("sch_atm_pop(vcc %p,skb %p,[qdisc %p])\n", vcc, skb, p);
+ VCC2FLOW(vcc)->old_pop(vcc, skb);
tasklet_schedule(&p->task);
}
static const u8 llc_oui_ip[] = {
- 0xaa, /* DSAP: non-ISO */
- 0xaa, /* SSAP: non-ISO */
- 0x03, /* Ctrl: Unnumbered Information Command PDU */
- 0x00, /* OUI: EtherType */
+ 0xaa, /* DSAP: non-ISO */
+ 0xaa, /* SSAP: non-ISO */
+ 0x03, /* Ctrl: Unnumbered Information Command PDU */
+ 0x00, /* OUI: EtherType */
0x00, 0x00,
- 0x08, 0x00 }; /* Ethertype IP (0800) */
+ 0x08, 0x00
+}; /* Ethertype IP (0800) */
static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
- struct rtattr **tca, unsigned long *arg)
+ struct rtattr **tca, unsigned long *arg)
{
struct atm_qdisc_data *p = PRIV(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *) *arg;
+ struct atm_flow_data *flow = (struct atm_flow_data *)*arg;
struct atm_flow_data *excess = NULL;
- struct rtattr *opt = tca[TCA_OPTIONS-1];
+ struct rtattr *opt = tca[TCA_OPTIONS - 1];
struct rtattr *tb[TCA_ATM_MAX];
struct socket *sock;
- int fd,error,hdr_len;
+ int fd, error, hdr_len;
void *hdr;
DPRINTK("atm_tc_change(sch %p,[qdisc %p],classid %x,parent %x,"
- "flow %p,opt %p)\n",sch,p,classid,parent,flow,opt);
+ "flow %p,opt %p)\n", sch, p, classid, parent, flow, opt);
/*
* The concept of parents doesn't apply for this qdisc.
*/
@@ -241,33 +234,36 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
* class needs to be removed and a new one added. (This may be changed
* later.)
*/
- if (flow) return -EBUSY;
+ if (flow)
+ return -EBUSY;
if (opt == NULL || rtattr_parse_nested(tb, TCA_ATM_MAX, opt))
return -EINVAL;
- if (!tb[TCA_ATM_FD-1] || RTA_PAYLOAD(tb[TCA_ATM_FD-1]) < sizeof(fd))
+ if (!tb[TCA_ATM_FD - 1] || RTA_PAYLOAD(tb[TCA_ATM_FD - 1]) < sizeof(fd))
return -EINVAL;
- fd = *(int *) RTA_DATA(tb[TCA_ATM_FD-1]);
- DPRINTK("atm_tc_change: fd %d\n",fd);
- if (tb[TCA_ATM_HDR-1]) {
- hdr_len = RTA_PAYLOAD(tb[TCA_ATM_HDR-1]);
- hdr = RTA_DATA(tb[TCA_ATM_HDR-1]);
- }
- else {
+ fd = *(int *)RTA_DATA(tb[TCA_ATM_FD - 1]);
+ DPRINTK("atm_tc_change: fd %d\n", fd);
+ if (tb[TCA_ATM_HDR - 1]) {
+ hdr_len = RTA_PAYLOAD(tb[TCA_ATM_HDR - 1]);
+ hdr = RTA_DATA(tb[TCA_ATM_HDR - 1]);
+ } else {
hdr_len = RFC1483LLC_LEN;
- hdr = NULL; /* default LLC/SNAP for IP */
+ hdr = NULL; /* default LLC/SNAP for IP */
}
- if (!tb[TCA_ATM_EXCESS-1]) excess = NULL;
+ if (!tb[TCA_ATM_EXCESS - 1])
+ excess = NULL;
else {
- if (RTA_PAYLOAD(tb[TCA_ATM_EXCESS-1]) != sizeof(u32))
+ if (RTA_PAYLOAD(tb[TCA_ATM_EXCESS - 1]) != sizeof(u32))
return -EINVAL;
- excess = (struct atm_flow_data *) atm_tc_get(sch,
- *(u32 *) RTA_DATA(tb[TCA_ATM_EXCESS-1]));
- if (!excess) return -ENOENT;
+ excess = (struct atm_flow_data *)
+ atm_tc_get(sch, *(u32 *)RTA_DATA(tb[TCA_ATM_EXCESS - 1]));
+ if (!excess)
+ return -ENOENT;
}
DPRINTK("atm_tc_change: type %d, payload %d, hdr_len %d\n",
- opt->rta_type,RTA_PAYLOAD(opt),hdr_len);
- if (!(sock = sockfd_lookup(fd,&error))) return error; /* f_count++ */
- DPRINTK("atm_tc_change: f_count %d\n",file_count(sock->file));
+ opt->rta_type, RTA_PAYLOAD(opt), hdr_len);
+ if (!(sock = sockfd_lookup(fd, &error)))
+ return error; /* f_count++ */
+ DPRINTK("atm_tc_change: f_count %d\n", file_count(sock->file));
if (sock->ops->family != PF_ATMSVC && sock->ops->family != PF_ATMPVC) {
error = -EPROTOTYPE;
goto err_out;
@@ -280,37 +276,37 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
error = -EINVAL;
goto err_out;
}
- if (find_flow(p,flow)) {
+ if (find_flow(p, flow)) {
error = -EEXIST;
goto err_out;
}
- }
- else {
+ } else {
int i;
unsigned long cl;
for (i = 1; i < 0x8000; i++) {
- classid = TC_H_MAKE(sch->handle,0x8000 | i);
- if (!(cl = atm_tc_get(sch,classid))) break;
- atm_tc_put(sch,cl);
+ classid = TC_H_MAKE(sch->handle, 0x8000 | i);
+ if (!(cl = atm_tc_get(sch, classid)))
+ break;
+ atm_tc_put(sch, cl);
}
}
- DPRINTK("atm_tc_change: new id %x\n",classid);
- flow = kmalloc(sizeof(struct atm_flow_data)+hdr_len,GFP_KERNEL);
- DPRINTK("atm_tc_change: flow %p\n",flow);
+ DPRINTK("atm_tc_change: new id %x\n", classid);
+ flow = kmalloc(sizeof(struct atm_flow_data) + hdr_len, GFP_KERNEL);
+ DPRINTK("atm_tc_change: flow %p\n", flow);
if (!flow) {
error = -ENOBUFS;
goto err_out;
}
- memset(flow,0,sizeof(*flow));
+ memset(flow, 0, sizeof(*flow));
flow->filter_list = NULL;
- if (!(flow->q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops,classid)))
+ if (!(flow->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid)))
flow->q = &noop_qdisc;
- DPRINTK("atm_tc_change: qdisc %p\n",flow->q);
+ DPRINTK("atm_tc_change: qdisc %p\n", flow->q);
flow->sock = sock;
- flow->vcc = ATM_SD(sock); /* speedup */
+ flow->vcc = ATM_SD(sock); /* speedup */
flow->vcc->user_back = flow;
- DPRINTK("atm_tc_change: vcc %p\n",flow->vcc);
+ DPRINTK("atm_tc_change: vcc %p\n", flow->vcc);
flow->old_pop = flow->vcc->pop;
flow->parent = p;
flow->vcc->pop = sch_atm_pop;
@@ -321,50 +317,53 @@ static int atm_tc_change(struct Qdisc *sch, u32 classid, u32 parent,
p->link.next = flow;
flow->hdr_len = hdr_len;
if (hdr)
- memcpy(flow->hdr,hdr,hdr_len);
+ memcpy(flow->hdr, hdr, hdr_len);
else
- memcpy(flow->hdr,llc_oui_ip,sizeof(llc_oui_ip));
- *arg = (unsigned long) flow;
+ memcpy(flow->hdr, llc_oui_ip, sizeof(llc_oui_ip));
+ *arg = (unsigned long)flow;
return 0;
err_out:
- if (excess) atm_tc_put(sch,(unsigned long) excess);
+ if (excess)
+ atm_tc_put(sch, (unsigned long)excess);
sockfd_put(sock);
return error;
}
-
-static int atm_tc_delete(struct Qdisc *sch,unsigned long arg)
+static int atm_tc_delete(struct Qdisc *sch, unsigned long arg)
{
struct atm_qdisc_data *p = PRIV(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *) arg;
+ struct atm_flow_data *flow = (struct atm_flow_data *)arg;
- DPRINTK("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n",sch,p,flow);
- if (!find_flow(PRIV(sch),flow)) return -EINVAL;
- if (flow->filter_list || flow == &p->link) return -EBUSY;
+ DPRINTK("atm_tc_delete(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
+ if (!find_flow(PRIV(sch), flow))
+ return -EINVAL;
+ if (flow->filter_list || flow == &p->link)
+ return -EBUSY;
/*
* Reference count must be 2: one for "keepalive" (set at class
* creation), and one for the reference held when calling delete.
*/
if (flow->ref < 2) {
- printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n",flow->ref);
+ printk(KERN_ERR "atm_tc_delete: flow->ref == %d\n", flow->ref);
return -EINVAL;
}
- if (flow->ref > 2) return -EBUSY; /* catch references via excess, etc.*/
- atm_tc_put(sch,arg);
+ if (flow->ref > 2)
+ return -EBUSY; /* catch references via excess, etc. */
+ atm_tc_put(sch, arg);
return 0;
}
-
-static void atm_tc_walk(struct Qdisc *sch,struct qdisc_walker *walker)
+static void atm_tc_walk(struct Qdisc *sch, struct qdisc_walker *walker)
{
struct atm_qdisc_data *p = PRIV(sch);
struct atm_flow_data *flow;
- DPRINTK("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n",sch,p,walker);
- if (walker->stop) return;
+ DPRINTK("atm_tc_walk(sch %p,[qdisc %p],walker %p)\n", sch, p, walker);
+ if (walker->stop)
+ return;
for (flow = p->flows; flow; flow = flow->next) {
if (walker->count >= walker->skip)
- if (walker->fn(sch,(unsigned long) flow,walker) < 0) {
+ if (walker->fn(sch, (unsigned long)flow, walker) < 0) {
walker->stop = 1;
break;
}
@@ -372,73 +371,71 @@ static void atm_tc_walk(struct Qdisc *sch,struct qdisc_walker *walker)
}
}
-
-static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch,unsigned long cl)
+static struct tcf_proto **atm_tc_find_tcf(struct Qdisc *sch, unsigned long cl)
{
struct atm_qdisc_data *p = PRIV(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *) cl;
+ struct atm_flow_data *flow = (struct atm_flow_data *)cl;
- DPRINTK("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n",sch,p,flow);
+ DPRINTK("atm_tc_find_tcf(sch %p,[qdisc %p],flow %p)\n", sch, p, flow);
return flow ? &flow->filter_list : &p->link.filter_list;
}
-
/* --------------------------- Qdisc operations ---------------------------- */
-
-static int atm_tc_enqueue(struct sk_buff *skb,struct Qdisc *sch)
+static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{
struct atm_qdisc_data *p = PRIV(sch);
- struct atm_flow_data *flow = NULL ; /* @@@ */
+ struct atm_flow_data *flow = NULL; /* @@@ */
struct tcf_result res;
int result;
int ret = NET_XMIT_POLICED;
- D2PRINTK("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
- result = TC_POLICE_OK; /* be nice to gcc */
+ D2PRINTK("atm_tc_enqueue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
+ result = TC_POLICE_OK; /* be nice to gcc */
if (TC_H_MAJ(skb->priority) != sch->handle ||
- !(flow = (struct atm_flow_data *) atm_tc_get(sch,skb->priority)))
+ !(flow = (struct atm_flow_data *)atm_tc_get(sch, skb->priority)))
for (flow = p->flows; flow; flow = flow->next)
if (flow->filter_list) {
- result = tc_classify(skb,flow->filter_list,
- &res);
- if (result < 0) continue;
- flow = (struct atm_flow_data *) res.class;
- if (!flow) flow = lookup_flow(sch,res.classid);
+ result = tc_classify_compat(skb,
+ flow->filter_list,
+ &res);
+ if (result < 0)
+ continue;
+ flow = (struct atm_flow_data *)res.class;
+ if (!flow)
+ flow = lookup_flow(sch, res.classid);
break;
}
- if (!flow) flow = &p->link;
+ if (!flow)
+ flow = &p->link;
else {
if (flow->vcc)
ATM_SKB(skb)->atm_options = flow->vcc->atm_options;
- /*@@@ looks good ... but it's not supposed to work :-)*/
-#ifdef CONFIG_NET_CLS_POLICE
+ /*@@@ looks good ... but it's not supposed to work :-) */
+#ifdef CONFIG_NET_CLS_ACT
switch (result) {
- case TC_POLICE_SHOT:
- kfree_skb(skb);
- break;
- case TC_POLICE_RECLASSIFY:
- if (flow->excess) flow = flow->excess;
- else {
- ATM_SKB(skb)->atm_options |=
- ATM_ATMOPT_CLP;
- break;
- }
- /* fall through */
- case TC_POLICE_OK:
- /* fall through */
- default:
- break;
+ case TC_ACT_QUEUED:
+ case TC_ACT_STOLEN:
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ case TC_ACT_SHOT:
+ kfree_skb(skb);
+ goto drop;
+ case TC_POLICE_RECLASSIFY:
+ if (flow->excess)
+ flow = flow->excess;
+ else
+ ATM_SKB(skb)->atm_options |= ATM_ATMOPT_CLP;
+ break;
}
#endif
}
- if (
-#ifdef CONFIG_NET_CLS_POLICE
- result == TC_POLICE_SHOT ||
-#endif
- (ret = flow->q->enqueue(skb,flow->q)) != 0) {
+
+ if ((ret = flow->q->enqueue(skb, flow->q)) != 0) {
+drop: __maybe_unused
sch->qstats.drops++;
- if (flow) flow->qstats.drops++;
+ if (flow)
+ flow->qstats.drops++;
return ret;
}
sch->bstats.bytes += skb->len;
@@ -462,7 +459,6 @@ static int atm_tc_enqueue(struct sk_buff *skb,struct Qdisc *sch)
return NET_XMIT_BYPASS;
}
-
/*
* Dequeue packets and send them over ATM. Note that we quite deliberately
* avoid checking net_device's flow control here, simply because sch_atm
@@ -470,166 +466,163 @@ static int atm_tc_enqueue(struct sk_buff *skb,struct Qdisc *sch)
* non-ATM interfaces.
*/
-
static void sch_atm_dequeue(unsigned long data)
{
- struct Qdisc *sch = (struct Qdisc *) data;
+ struct Qdisc *sch = (struct Qdisc *)data;
struct atm_qdisc_data *p = PRIV(sch);
struct atm_flow_data *flow;
struct sk_buff *skb;
- D2PRINTK("sch_atm_dequeue(sch %p,[qdisc %p])\n",sch,p);
+ D2PRINTK("sch_atm_dequeue(sch %p,[qdisc %p])\n", sch, p);
for (flow = p->link.next; flow; flow = flow->next)
/*
* If traffic is properly shaped, this won't generate nasty
* little bursts. Otherwise, it may ... (but that's okay)
*/
while ((skb = flow->q->dequeue(flow->q))) {
- if (!atm_may_send(flow->vcc,skb->truesize)) {
- (void) flow->q->ops->requeue(skb,flow->q);
+ if (!atm_may_send(flow->vcc, skb->truesize)) {
+ (void)flow->q->ops->requeue(skb, flow->q);
break;
}
- D2PRINTK("atm_tc_dequeue: sending on class %p\n",flow);
+ D2PRINTK("atm_tc_dequeue: sending on class %p\n", flow);
/* remove any LL header somebody else has attached */
skb_pull(skb, skb_network_offset(skb));
if (skb_headroom(skb) < flow->hdr_len) {
struct sk_buff *new;
- new = skb_realloc_headroom(skb,flow->hdr_len);
+ new = skb_realloc_headroom(skb, flow->hdr_len);
dev_kfree_skb(skb);
- if (!new) continue;
+ if (!new)
+ continue;
skb = new;
}
D2PRINTK("sch_atm_dequeue: ip %p, data %p\n",
skb_network_header(skb), skb->data);
ATM_SKB(skb)->vcc = flow->vcc;
- memcpy(skb_push(skb,flow->hdr_len),flow->hdr,
- flow->hdr_len);
+ memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
+ flow->hdr_len);
atomic_add(skb->truesize,
&sk_atm(flow->vcc)->sk_wmem_alloc);
/* atm.atm_options are already set by atm_tc_enqueue */
- (void) flow->vcc->send(flow->vcc,skb);
+ flow->vcc->send(flow->vcc, skb);
}
}
-
static struct sk_buff *atm_tc_dequeue(struct Qdisc *sch)
{
struct atm_qdisc_data *p = PRIV(sch);
struct sk_buff *skb;
- D2PRINTK("atm_tc_dequeue(sch %p,[qdisc %p])\n",sch,p);
+ D2PRINTK("atm_tc_dequeue(sch %p,[qdisc %p])\n", sch, p);
tasklet_schedule(&p->task);
skb = p->link.q->dequeue(p->link.q);
- if (skb) sch->q.qlen--;
+ if (skb)
+ sch->q.qlen--;
return skb;
}
-
-static int atm_tc_requeue(struct sk_buff *skb,struct Qdisc *sch)
+static int atm_tc_requeue(struct sk_buff *skb, struct Qdisc *sch)
{
struct atm_qdisc_data *p = PRIV(sch);
int ret;
- D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
- ret = p->link.q->ops->requeue(skb,p->link.q);
+ D2PRINTK("atm_tc_requeue(skb %p,sch %p,[qdisc %p])\n", skb, sch, p);
+ ret = p->link.q->ops->requeue(skb, p->link.q);
if (!ret) {
- sch->q.qlen++;
- sch->qstats.requeues++;
- } else {
+ sch->q.qlen++;
+ sch->qstats.requeues++;
+ } else {
sch->qstats.drops++;
p->link.qstats.drops++;
}
return ret;
}
-
static unsigned int atm_tc_drop(struct Qdisc *sch)
{
struct atm_qdisc_data *p = PRIV(sch);
struct atm_flow_data *flow;
unsigned int len;
- DPRINTK("atm_tc_drop(sch %p,[qdisc %p])\n",sch,p);
+ DPRINTK("atm_tc_drop(sch %p,[qdisc %p])\n", sch, p);
for (flow = p->flows; flow; flow = flow->next)
if (flow->q->ops->drop && (len = flow->q->ops->drop(flow->q)))
return len;
return 0;
}
-
-static int atm_tc_init(struct Qdisc *sch,struct rtattr *opt)
+static int atm_tc_init(struct Qdisc *sch, struct rtattr *opt)
{
struct atm_qdisc_data *p = PRIV(sch);
- DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt);
+ DPRINTK("atm_tc_init(sch %p,[qdisc %p],opt %p)\n", sch, p, opt);
p->flows = &p->link;
- if(!(p->link.q = qdisc_create_dflt(sch->dev,&pfifo_qdisc_ops,
- sch->handle)))
+ if (!(p->link.q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops,
+ sch->handle)))
p->link.q = &noop_qdisc;
- DPRINTK("atm_tc_init: link (%p) qdisc %p\n",&p->link,p->link.q);
+ DPRINTK("atm_tc_init: link (%p) qdisc %p\n", &p->link, p->link.q);
p->link.filter_list = NULL;
p->link.vcc = NULL;
p->link.sock = NULL;
p->link.classid = sch->handle;
p->link.ref = 1;
p->link.next = NULL;
- tasklet_init(&p->task,sch_atm_dequeue,(unsigned long) sch);
+ tasklet_init(&p->task, sch_atm_dequeue, (unsigned long)sch);
return 0;
}
-
static void atm_tc_reset(struct Qdisc *sch)
{
struct atm_qdisc_data *p = PRIV(sch);
struct atm_flow_data *flow;
- DPRINTK("atm_tc_reset(sch %p,[qdisc %p])\n",sch,p);
- for (flow = p->flows; flow; flow = flow->next) qdisc_reset(flow->q);
+ DPRINTK("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
+ for (flow = p->flows; flow; flow = flow->next)
+ qdisc_reset(flow->q);
sch->q.qlen = 0;
}
-
static void atm_tc_destroy(struct Qdisc *sch)
{
struct atm_qdisc_data *p = PRIV(sch);
struct atm_flow_data *flow;
- DPRINTK("atm_tc_destroy(sch %p,[qdisc %p])\n",sch,p);
+ DPRINTK("atm_tc_destroy(sch %p,[qdisc %p])\n", sch, p);
/* races ? */
while ((flow = p->flows)) {
tcf_destroy_chain(flow->filter_list);
+ flow->filter_list = NULL;
if (flow->ref > 1)
- printk(KERN_ERR "atm_destroy: %p->ref = %d\n",flow,
- flow->ref);
- atm_tc_put(sch,(unsigned long) flow);
+ printk(KERN_ERR "atm_destroy: %p->ref = %d\n", flow,
+ flow->ref);
+ atm_tc_put(sch, (unsigned long)flow);
if (p->flows == flow) {
printk(KERN_ERR "atm_destroy: putting flow %p didn't "
- "kill it\n",flow);
- p->flows = flow->next; /* brute force */
+ "kill it\n", flow);
+ p->flows = flow->next; /* brute force */
break;
}
}
tasklet_kill(&p->task);
}
-
static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
- struct sk_buff *skb, struct tcmsg *tcm)
+ struct sk_buff *skb, struct tcmsg *tcm)
{
struct atm_qdisc_data *p = PRIV(sch);
- struct atm_flow_data *flow = (struct atm_flow_data *) cl;
+ struct atm_flow_data *flow = (struct atm_flow_data *)cl;
unsigned char *b = skb_tail_pointer(skb);
struct rtattr *rta;
DPRINTK("atm_tc_dump_class(sch %p,[qdisc %p],flow %p,skb %p,tcm %p)\n",
- sch,p,flow,skb,tcm);
- if (!find_flow(p,flow)) return -EINVAL;
+ sch, p, flow, skb, tcm);
+ if (!find_flow(p, flow))
+ return -EINVAL;
tcm->tcm_handle = flow->classid;
tcm->tcm_info = flow->q->handle;
- rta = (struct rtattr *) b;
- RTA_PUT(skb,TCA_OPTIONS,0,NULL);
- RTA_PUT(skb,TCA_ATM_HDR,flow->hdr_len,flow->hdr);
+ rta = (struct rtattr *)b;
+ RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
+ RTA_PUT(skb, TCA_ATM_HDR, flow->hdr_len, flow->hdr);
if (flow->vcc) {
struct sockaddr_atmpvc pvc;
int state;
@@ -638,16 +631,16 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl,
pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1;
pvc.sap_addr.vpi = flow->vcc->vpi;
pvc.sap_addr.vci = flow->vcc->vci;
- RTA_PUT(skb,TCA_ATM_ADDR,sizeof(pvc),&pvc);
+ RTA_PUT(skb, TCA_ATM_ADDR, sizeof(pvc), &pvc);
state = ATM_VF2VS(flow->vcc->flags);
- RTA_PUT(skb,TCA_ATM_STATE,sizeof(state),&state);
+ RTA_PUT(skb, TCA_ATM_STATE, sizeof(state), &state);
}
if (flow->excess)
- RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(u32),&flow->classid);
+ RTA_PUT(skb, TCA_ATM_EXCESS, sizeof(u32), &flow->classid);
else {
static u32 zero;
- RTA_PUT(skb,TCA_ATM_EXCESS,sizeof(zero),&zero);
+ RTA_PUT(skb, TCA_ATM_EXCESS, sizeof(zero), &zero);
}
rta->rta_len = skb_tail_pointer(skb) - b;
return skb->len;
@@ -658,9 +651,9 @@ rtattr_failure:
}
static int
atm_tc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
- struct gnet_dump *d)
+ struct gnet_dump *d)
{
- struct atm_flow_data *flow = (struct atm_flow_data *) arg;
+ struct atm_flow_data *flow = (struct atm_flow_data *)arg;
flow->qstats.qlen = flow->q->q.qlen;
@@ -677,38 +670,35 @@ static int atm_tc_dump(struct Qdisc *sch, struct sk_buff *skb)
}
static struct Qdisc_class_ops atm_class_ops = {
- .graft = atm_tc_graft,
- .leaf = atm_tc_leaf,
- .get = atm_tc_get,
- .put = atm_tc_put,
- .change = atm_tc_change,
- .delete = atm_tc_delete,
- .walk = atm_tc_walk,
- .tcf_chain = atm_tc_find_tcf,
- .bind_tcf = atm_tc_bind_filter,
- .unbind_tcf = atm_tc_put,
- .dump = atm_tc_dump_class,
- .dump_stats = atm_tc_dump_class_stats,
+ .graft = atm_tc_graft,
+ .leaf = atm_tc_leaf,
+ .get = atm_tc_get,
+ .put = atm_tc_put,
+ .change = atm_tc_change,
+ .delete = atm_tc_delete,
+ .walk = atm_tc_walk,
+ .tcf_chain = atm_tc_find_tcf,
+ .bind_tcf = atm_tc_bind_filter,
+ .unbind_tcf = atm_tc_put,
+ .dump = atm_tc_dump_class,
+ .dump_stats = atm_tc_dump_class_stats,
};
static struct Qdisc_ops atm_qdisc_ops = {
- .next = NULL,
- .cl_ops = &atm_class_ops,
- .id = "atm",
- .priv_size = sizeof(struct atm_qdisc_data),
- .enqueue = atm_tc_enqueue,
- .dequeue = atm_tc_dequeue,
- .requeue = atm_tc_requeue,
- .drop = atm_tc_drop,
- .init = atm_tc_init,
- .reset = atm_tc_reset,
- .destroy = atm_tc_destroy,
- .change = NULL,
- .dump = atm_tc_dump,
- .owner = THIS_MODULE,
+ .cl_ops = &atm_class_ops,
+ .id = "atm",
+ .priv_size = sizeof(struct atm_qdisc_data),
+ .enqueue = atm_tc_enqueue,
+ .dequeue = atm_tc_dequeue,
+ .requeue = atm_tc_requeue,
+ .drop = atm_tc_drop,
+ .init = atm_tc_init,
+ .reset = atm_tc_reset,
+ .destroy = atm_tc_destroy,
+ .dump = atm_tc_dump,
+ .owner = THIS_MODULE,
};
-
static int __init atm_init(void)
{
return register_qdisc(&atm_qdisc_ops);
diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
index cb0c456aa349..f914fc43a124 100644
--- a/net/sched/sch_blackhole.c
+++ b/net/sched/sch_blackhole.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index a294542cb8e4..e38c2839b25c 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -11,28 +11,12 @@
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/netlink.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
+#include <net/netlink.h>
#include <net/pkt_sched.h>
@@ -98,7 +82,7 @@ struct cbq_class
unsigned char priority2; /* priority to be used after overlimit */
unsigned char ewma_log; /* time constant for idle time calculation */
unsigned char ovl_strategy;
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
unsigned char police;
#endif
@@ -148,7 +132,6 @@ struct cbq_class
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
- spinlock_t *stats_lock;
struct tc_cbq_xstats xstats;
struct tcf_proto *filter_list;
@@ -171,7 +154,7 @@ struct cbq_sched_data
struct cbq_class *active[TC_CBQ_MAXPRIO+1]; /* List of all classes
with backlog */
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
struct cbq_class *rx_class;
#endif
struct cbq_class *tx_class;
@@ -213,7 +196,7 @@ cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
return NULL;
}
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
static struct cbq_class *
cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
@@ -264,7 +247,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
/*
* Step 2+n. Apply classifier.
*/
- if (!head->filter_list || (result = tc_classify(skb, head->filter_list, &res)) < 0)
+ if (!head->filter_list ||
+ (result = tc_classify_compat(skb, head->filter_list, &res)) < 0)
goto fallback;
if ((cl = (void*)res.class) == NULL) {
@@ -284,15 +268,8 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
*qerr = NET_XMIT_SUCCESS;
case TC_ACT_SHOT:
return NULL;
- }
-#elif defined(CONFIG_NET_CLS_POLICE)
- switch (result) {
- case TC_POLICE_RECLASSIFY:
+ case TC_ACT_RECLASSIFY:
return cbq_reclassify(skb, cl);
- case TC_POLICE_SHOT:
- return NULL;
- default:
- break;
}
#endif
if (cl->level == 0)
@@ -406,7 +383,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
int ret;
struct cbq_class *cl = cbq_classify(skb, sch, &ret);
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
q->rx_class = cl;
#endif
if (cl == NULL) {
@@ -416,7 +393,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return ret;
}
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
cl->q->__parent = sch;
#endif
if ((ret = cl->q->enqueue(skb, cl->q)) == NET_XMIT_SUCCESS) {
@@ -451,7 +428,7 @@ cbq_requeue(struct sk_buff *skb, struct Qdisc *sch)
cbq_mark_toplevel(q, cl);
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
q->rx_class = cl;
cl->q->__parent = sch;
#endif
@@ -686,9 +663,7 @@ static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
return HRTIMER_NORESTART;
}
-
-#ifdef CONFIG_NET_CLS_POLICE
-
+#ifdef CONFIG_NET_CLS_ACT
static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
{
int len = skb->len;
@@ -1381,7 +1356,7 @@ static int cbq_set_overlimit(struct cbq_class *cl, struct tc_cbq_ovl *ovl)
return 0;
}
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
static int cbq_set_police(struct cbq_class *cl, struct tc_cbq_police *p)
{
cl->police = p->police;
@@ -1442,7 +1417,6 @@ static int cbq_init(struct Qdisc *sch, struct rtattr *opt)
q->link.ewma_log = TC_CBQ_DEF_EWMA;
q->link.avpkt = q->link.allot/2;
q->link.minidle = -0x7FFFFFFF;
- q->link.stats_lock = &sch->dev->queue_lock;
qdisc_watchdog_init(&q->watchdog, sch);
hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -1550,7 +1524,7 @@ rtattr_failure:
return -1;
}
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
static __inline__ int cbq_dump_police(struct sk_buff *skb, struct cbq_class *cl)
{
unsigned char *b = skb_tail_pointer(skb);
@@ -1576,7 +1550,7 @@ static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
cbq_dump_rate(skb, cl) < 0 ||
cbq_dump_wrr(skb, cl) < 0 ||
cbq_dump_ovl(skb, cl) < 0 ||
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
cbq_dump_police(skb, cl) < 0 ||
#endif
cbq_dump_fopt(skb, cl) < 0)
@@ -1653,9 +1627,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
cl->xstats.undertime = cl->undertime - q->now;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-#endif
gnet_stats_copy_queue(d, &cl->qstats) < 0)
return -1;
@@ -1673,7 +1645,7 @@ static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
cl->classid)) == NULL)
return -ENOBUFS;
} else {
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
if (cl->police == TC_POLICE_RECLASSIFY)
new->reshape_fail = cbq_reshape_fail;
#endif
@@ -1726,9 +1698,7 @@ static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
tcf_destroy_chain(cl->filter_list);
qdisc_destroy(cl->q);
qdisc_put_rtab(cl->R_tab);
-#ifdef CONFIG_NET_ESTIMATOR
gen_kill_estimator(&cl->bstats, &cl->rate_est);
-#endif
if (cl != &q->link)
kfree(cl);
}
@@ -1740,7 +1710,7 @@ cbq_destroy(struct Qdisc* sch)
struct cbq_class *cl;
unsigned h;
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
q->rx_class = NULL;
#endif
/*
@@ -1748,10 +1718,12 @@ cbq_destroy(struct Qdisc* sch)
* classes from root to leafs which means that filters can still
* be bound to classes which have been destroyed already. --TGR '04
*/
- for (h = 0; h < 16; h++)
- for (cl = q->classes[h]; cl; cl = cl->next)
+ for (h = 0; h < 16; h++) {
+ for (cl = q->classes[h]; cl; cl = cl->next) {
tcf_destroy_chain(cl->filter_list);
-
+ cl->filter_list = NULL;
+ }
+ }
for (h = 0; h < 16; h++) {
struct cbq_class *next;
@@ -1767,7 +1739,7 @@ static void cbq_put(struct Qdisc *sch, unsigned long arg)
struct cbq_class *cl = (struct cbq_class*)arg;
if (--cl->refcnt == 0) {
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
struct cbq_sched_data *q = qdisc_priv(sch);
spin_lock_bh(&sch->dev->queue_lock);
@@ -1815,7 +1787,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
RTA_PAYLOAD(tb[TCA_CBQ_WRROPT-1]) < sizeof(struct tc_cbq_wrropt))
return -EINVAL;
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
if (tb[TCA_CBQ_POLICE-1] &&
RTA_PAYLOAD(tb[TCA_CBQ_POLICE-1]) < sizeof(struct tc_cbq_police))
return -EINVAL;
@@ -1858,7 +1830,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
if (tb[TCA_CBQ_OVL_STRATEGY-1])
cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
if (tb[TCA_CBQ_POLICE-1])
cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
#endif
@@ -1871,11 +1843,10 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
sch_tree_unlock(sch);
-#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
gen_replace_estimator(&cl->bstats, &cl->rate_est,
- cl->stats_lock, tca[TCA_RATE-1]);
-#endif
+ &sch->dev->queue_lock,
+ tca[TCA_RATE-1]);
return 0;
}
@@ -1933,7 +1904,6 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
cl->allot = parent->allot;
cl->quantum = cl->allot;
cl->weight = cl->R_tab->rate.rate;
- cl->stats_lock = &sch->dev->queue_lock;
sch_tree_lock(sch);
cbq_link_class(cl);
@@ -1953,7 +1923,7 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
cl->overlimit = cbq_ovl_classic;
if (tb[TCA_CBQ_OVL_STRATEGY-1])
cbq_set_overlimit(cl, RTA_DATA(tb[TCA_CBQ_OVL_STRATEGY-1]));
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
if (tb[TCA_CBQ_POLICE-1])
cbq_set_police(cl, RTA_DATA(tb[TCA_CBQ_POLICE-1]));
#endif
@@ -1961,11 +1931,9 @@ cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct rtattr **t
cbq_set_fopt(cl, RTA_DATA(tb[TCA_CBQ_FOPT-1]));
sch_tree_unlock(sch);
-#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
gen_new_estimator(&cl->bstats, &cl->rate_est,
- cl->stats_lock, tca[TCA_RATE-1]);
-#endif
+ &sch->dev->queue_lock, tca[TCA_RATE-1]);
*arg = (unsigned long)cl;
return 0;
@@ -1999,7 +1967,7 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
q->tx_class = NULL;
q->tx_borrowed = NULL;
}
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
if (q->rx_class == cl)
q->rx_class = NULL;
#endif
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 3c6fd181263f..60f89199e3da 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -9,7 +9,6 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
-#include <linux/netdevice.h> /* for pkt_sched */
#include <linux/rtnetlink.h>
#include <net/pkt_sched.h>
#include <net/dsfield.h>
@@ -238,25 +237,23 @@ static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
D2PRINTK("result %d class 0x%04x\n", result, res.classid);
switch (result) {
-#ifdef CONFIG_NET_CLS_POLICE
- case TC_POLICE_SHOT:
- kfree_skb(skb);
- sch->qstats.drops++;
- return NET_XMIT_POLICED;
-#if 0
- case TC_POLICE_RECLASSIFY:
- /* FIXME: what to do here ??? */
+#ifdef CONFIG_NET_CLS_ACT
+ case TC_ACT_QUEUED:
+ case TC_ACT_STOLEN:
+ kfree_skb(skb);
+ return NET_XMIT_SUCCESS;
+ case TC_ACT_SHOT:
+ kfree_skb(skb);
+ sch->qstats.drops++;
+ return NET_XMIT_BYPASS;
#endif
-#endif
- case TC_POLICE_OK:
- skb->tc_index = TC_H_MIN(res.classid);
- break;
- case TC_POLICE_UNSPEC:
- /* fall through */
- default:
- if (p->default_index != NO_DEFAULT_INDEX)
- skb->tc_index = p->default_index;
- break;
+ case TC_ACT_OK:
+ skb->tc_index = TC_H_MIN(res.classid);
+ break;
+ default:
+ if (p->default_index != NO_DEFAULT_INDEX)
+ skb->tc_index = p->default_index;
+ break;
}
}
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index c2689f4ba8de..c264308f17c1 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -13,7 +13,6 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index f28bb2dc58d0..c81649cf0b9e 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -11,27 +11,19 @@
* - Ingress support
*/
-#include <asm/uaccess.h>
-#include <asm/system.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/init.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
/* Main transmission queue. */
@@ -59,122 +51,143 @@ void qdisc_unlock_tree(struct net_device *dev)
spin_unlock_bh(&dev->queue_lock);
}
-/*
- dev->queue_lock serializes queue accesses for this device
- AND dev->qdisc pointer itself.
+static inline int qdisc_qlen(struct Qdisc *q)
+{
+ return q->q.qlen;
+}
- netif_tx_lock serializes accesses to device driver.
+static inline int dev_requeue_skb(struct sk_buff *skb, struct net_device *dev,
+ struct Qdisc *q)
+{
+ if (unlikely(skb->next))
+ dev->gso_skb = skb;
+ else
+ q->ops->requeue(skb, q);
- dev->queue_lock and netif_tx_lock are mutually exclusive,
- if one is grabbed, another must be free.
- */
+ netif_schedule(dev);
+ return 0;
+}
+
+static inline struct sk_buff *dev_dequeue_skb(struct net_device *dev,
+ struct Qdisc *q)
+{
+ struct sk_buff *skb;
+
+ if ((skb = dev->gso_skb))
+ dev->gso_skb = NULL;
+ else
+ skb = q->dequeue(q);
+ return skb;
+}
-/* Kick device.
+static inline int handle_dev_cpu_collision(struct sk_buff *skb,
+ struct net_device *dev,
+ struct Qdisc *q)
+{
+ int ret;
- Returns: 0 - queue is empty or throttled.
- >0 - queue is not empty.
+ if (unlikely(dev->xmit_lock_owner == smp_processor_id())) {
+ /*
+ * Same CPU holding the lock. It may be a transient
+ * configuration error, when hard_start_xmit() recurses. We
+ * detect it by checking xmit owner and drop the packet when
+ * deadloop is detected. Return OK to try the next skb.
+ */
+ kfree_skb(skb);
+ if (net_ratelimit())
+ printk(KERN_WARNING "Dead loop on netdevice %s, "
+ "fix it urgently!\n", dev->name);
+ ret = qdisc_qlen(q);
+ } else {
+ /*
+ * Another cpu is holding lock, requeue & delay xmits for
+ * some time.
+ */
+ __get_cpu_var(netdev_rx_stat).cpu_collision++;
+ ret = dev_requeue_skb(skb, dev, q);
+ }
- NOTE: Called under dev->queue_lock with locally disabled BH.
-*/
+ return ret;
+}
+/*
+ * NOTE: Called under dev->queue_lock with locally disabled BH.
+ *
+ * __LINK_STATE_QDISC_RUNNING guarantees only one CPU can process this
+ * device at a time. dev->queue_lock serializes queue accesses for
+ * this device AND dev->qdisc pointer itself.
+ *
+ * netif_tx_lock serializes accesses to device driver.
+ *
+ * dev->queue_lock and netif_tx_lock are mutually exclusive,
+ * if one is grabbed, another must be free.
+ *
+ * Note, that this procedure can be called by a watchdog timer
+ *
+ * Returns to the caller:
+ * 0 - queue is empty or throttled.
+ * >0 - queue is not empty.
+ *
+ */
static inline int qdisc_restart(struct net_device *dev)
{
struct Qdisc *q = dev->qdisc;
struct sk_buff *skb;
+ unsigned lockless;
+ int ret;
/* Dequeue packet */
- if (((skb = dev->gso_skb)) || ((skb = q->dequeue(q)))) {
- unsigned nolock = (dev->features & NETIF_F_LLTX);
+ if (unlikely((skb = dev_dequeue_skb(dev, q)) == NULL))
+ return 0;
- dev->gso_skb = NULL;
+ /*
+ * When the driver has LLTX set, it does its own locking in
+ * start_xmit. These checks are worth it because even uncongested
+ * locks can be quite expensive. The driver can do a trylock, as
+ * is being done here; in case of lock contention it should return
+ * NETDEV_TX_LOCKED and the packet will be requeued.
+ */
+ lockless = (dev->features & NETIF_F_LLTX);
- /*
- * When the driver has LLTX set it does its own locking
- * in start_xmit. No need to add additional overhead by
- * locking again. These checks are worth it because
- * even uncongested locks can be quite expensive.
- * The driver can do trylock like here too, in case
- * of lock congestion it should return -1 and the packet
- * will be requeued.
- */
- if (!nolock) {
- if (!netif_tx_trylock(dev)) {
- collision:
- /* So, someone grabbed the driver. */
-
- /* It may be transient configuration error,
- when hard_start_xmit() recurses. We detect
- it by checking xmit owner and drop the
- packet when deadloop is detected.
- */
- if (dev->xmit_lock_owner == smp_processor_id()) {
- kfree_skb(skb);
- if (net_ratelimit())
- printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
- goto out;
- }
- __get_cpu_var(netdev_rx_stat).cpu_collision++;
- goto requeue;
- }
- }
+ if (!lockless && !netif_tx_trylock(dev)) {
+ /* Another CPU grabbed the driver tx lock */
+ return handle_dev_cpu_collision(skb, dev, q);
+ }
- {
- /* And release queue */
- spin_unlock(&dev->queue_lock);
-
- if (!netif_queue_stopped(dev)) {
- int ret;
-
- ret = dev_hard_start_xmit(skb, dev);
- if (ret == NETDEV_TX_OK) {
- if (!nolock) {
- netif_tx_unlock(dev);
- }
- spin_lock(&dev->queue_lock);
- q = dev->qdisc;
- goto out;
- }
- if (ret == NETDEV_TX_LOCKED && nolock) {
- spin_lock(&dev->queue_lock);
- q = dev->qdisc;
- goto collision;
- }
- }
+ /* And release queue */
+ spin_unlock(&dev->queue_lock);
- /* NETDEV_TX_BUSY - we need to requeue */
- /* Release the driver */
- if (!nolock) {
- netif_tx_unlock(dev);
- }
- spin_lock(&dev->queue_lock);
- q = dev->qdisc;
- }
+ ret = dev_hard_start_xmit(skb, dev);
- /* Device kicked us out :(
- This is possible in three cases:
+ if (!lockless)
+ netif_tx_unlock(dev);
- 0. driver is locked
- 1. fastroute is enabled
- 2. device cannot determine busy state
- before start of transmission (f.e. dialout)
- 3. device is buggy (ppp)
- */
+ spin_lock(&dev->queue_lock);
+ q = dev->qdisc;
-requeue:
- if (unlikely(q == &noop_qdisc))
- kfree_skb(skb);
- else if (skb->next)
- dev->gso_skb = skb;
- else
- q->ops->requeue(skb, q);
- netif_schedule(dev);
- return 0;
+ switch (ret) {
+ case NETDEV_TX_OK:
+ /* Driver sent out skb successfully */
+ ret = qdisc_qlen(q);
+ break;
+
+ case NETDEV_TX_LOCKED:
+ /* Driver try lock failed */
+ ret = handle_dev_cpu_collision(skb, dev, q);
+ break;
+
+ default:
+ /* Driver returned NETDEV_TX_BUSY - requeue skb */
+ if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit()))
+ printk(KERN_WARNING "BUG %s code %d qlen %d\n",
+ dev->name, ret, q->q.qlen);
+
+ ret = dev_requeue_skb(skb, dev, q);
+ break;
}
-out:
- BUG_ON((int) q->q.qlen < 0);
- return q->q.qlen;
+ return ret;
}
void __qdisc_run(struct net_device *dev)
@@ -224,7 +237,8 @@ void __netdev_watchdog_up(struct net_device *dev)
if (dev->tx_timeout) {
if (dev->watchdog_timeo <= 0)
dev->watchdog_timeo = 5*HZ;
- if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
+ if (!mod_timer(&dev->watchdog_timer,
+ round_jiffies(jiffies + dev->watchdog_timeo)))
dev_hold(dev);
}
}
@@ -492,9 +506,7 @@ void qdisc_destroy(struct Qdisc *qdisc)
return;
list_del(&qdisc->list);
-#ifdef CONFIG_NET_ESTIMATOR
gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
-#endif
if (ops->reset)
ops->reset(qdisc);
if (ops->destroy)
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index fa1b4fe7a5fd..3cc6dda02e2e 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -21,7 +21,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
#include <net/red.h>
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 9d124c4ee3a7..55e7e4530f43 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -53,7 +53,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
-#include <linux/jiffies.h>
#include <linux/compiler.h>
#include <linux/spinlock.h>
#include <linux/skbuff.h>
@@ -62,13 +61,11 @@
#include <linux/list.h>
#include <linux/rbtree.h>
#include <linux/init.h>
-#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
#include <net/pkt_cls.h>
-#include <asm/system.h>
#include <asm/div64.h>
/*
@@ -122,7 +119,6 @@ struct hfsc_class
struct gnet_stats_basic bstats;
struct gnet_stats_queue qstats;
struct gnet_stats_rate_est rate_est;
- spinlock_t *stats_lock;
unsigned int level; /* class level in hierarchy */
struct tcf_proto *filter_list; /* filter list */
unsigned int filter_cnt; /* filter count */
@@ -1054,11 +1050,10 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
}
sch_tree_unlock(sch);
-#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
gen_replace_estimator(&cl->bstats, &cl->rate_est,
- cl->stats_lock, tca[TCA_RATE-1]);
-#endif
+ &sch->dev->queue_lock,
+ tca[TCA_RATE-1]);
return 0;
}
@@ -1098,7 +1093,6 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->qdisc = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops, classid);
if (cl->qdisc == NULL)
cl->qdisc = &noop_qdisc;
- cl->stats_lock = &sch->dev->queue_lock;
INIT_LIST_HEAD(&cl->children);
cl->vt_tree = RB_ROOT;
cl->cf_tree = RB_ROOT;
@@ -1112,11 +1106,9 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
cl->cl_pcvtoff = parent->cl_cvtoff;
sch_tree_unlock(sch);
-#ifdef CONFIG_NET_ESTIMATOR
if (tca[TCA_RATE-1])
gen_new_estimator(&cl->bstats, &cl->rate_est,
- cl->stats_lock, tca[TCA_RATE-1]);
-#endif
+ &sch->dev->queue_lock, tca[TCA_RATE-1]);
*arg = (unsigned long)cl;
return 0;
}
@@ -1128,9 +1120,7 @@ hfsc_destroy_class(struct Qdisc *sch, struct hfsc_class *cl)
tcf_destroy_chain(cl->filter_list);
qdisc_destroy(cl->qdisc);
-#ifdef CONFIG_NET_ESTIMATOR
gen_kill_estimator(&cl->bstats, &cl->rate_est);
-#endif
if (cl != &q->root)
kfree(cl);
}
@@ -1184,9 +1174,6 @@ hfsc_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
case TC_ACT_SHOT:
return NULL;
}
-#elif defined(CONFIG_NET_CLS_POLICE)
- if (result == TC_POLICE_SHOT)
- return NULL;
#endif
if ((cl = (struct hfsc_class *)res.class) == NULL) {
if ((cl = hfsc_find_class(res.classid, sch)) == NULL)
@@ -1384,9 +1371,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
xstats.rtwork = cl->cl_cumul;
if (gnet_stats_copy_basic(d, &cl->bstats) < 0 ||
-#ifdef CONFIG_NET_ESTIMATOR
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
-#endif
gnet_stats_copy_queue(d, &cl->qstats) < 0)
return -1;
@@ -1448,8 +1433,6 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
return -EINVAL;
qopt = RTA_DATA(opt);
- sch->stats_lock = &sch->dev->queue_lock;
-
q->defcls = qopt->defcls;
for (i = 0; i < HFSC_HSIZE; i++)
INIT_LIST_HEAD(&q->clhash[i]);
@@ -1464,7 +1447,6 @@ hfsc_init_qdisc(struct Qdisc *sch, struct rtattr *opt)
sch->handle);
if (q->root.qdisc == NULL)
q->root.qdisc = &noop_qdisc;
- q->root.stats_lock = &sch->dev->queue_lock;
INIT_LIST_HEAD(&q->root.children);
q->root.vt_tree = RB_ROOT;
q->root.cf_tree = RB_ROOT;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 99bcec8dd04c..246a2f9765f1 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -28,32 +28,16 @@
* $Id: sch_htb.c,v 1.25 2003/12/07 11:08:25 devik Exp devik $
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/route.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/compiler.h>
+#include <linux/rbtree.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
-#include <linux/rbtree.h>
/* HTB algorithm.
Author: devik@cdi.cz
@@ -69,8 +53,6 @@
*/
#define HTB_HSIZE 16 /* classid hash size */
-#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
-#define HTB_RATECM 1 /* whether to use rate computer */
#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
@@ -95,12 +77,6 @@ struct htb_class {
struct tc_htb_xstats xstats; /* our special stats */
int refcnt; /* usage count of this class */
-#ifdef HTB_RATECM
- /* rate measurement counters */
- unsigned long rate_bytes, sum_bytes;
- unsigned long rate_packets, sum_packets;
-#endif
-
/* topology */
int level; /* our level (see above) */
struct htb_class *parent; /* parent class */
@@ -153,15 +129,12 @@ struct htb_class {
/* of un.leaf originals should be done. */
};
-/* TODO: maybe compute rate when size is too large .. or drop ? */
static inline long L2T(struct htb_class *cl, struct qdisc_rate_table *rate,
int size)
{
int slot = size >> rate->rate.cell_log;
- if (slot > 255) {
- cl->xstats.giants++;
- slot = 255;
- }
+ if (slot > 255)
+ return (rate->data[255]*(slot >> 8) + rate->data[slot & 0xFF]);
return rate->data[slot];
}
@@ -194,10 +167,6 @@ struct htb_sched {
int rate2quantum; /* quant = rate / rate2quantum */
psched_time_t now; /* cached dequeue time */
struct qdisc_watchdog watchdog;
-#ifdef HTB_RATECM
- struct timer_list rttim; /* rate computer timer */
- int recmp_bucket; /* which hash bucket to recompute next */
-#endif
/* non shaped skbs; let them go directly thru */
struct sk_buff_head direct_queue;
@@ -280,9 +249,6 @@ static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
case TC_ACT_SHOT:
return NULL;
}
-#elif defined(CONFIG_NET_CLS_POLICE)
- if (result == TC_POLICE_SHOT)
- return HTB_DIRECT;
#endif
if ((cl = (void *)res.class) == NULL) {
if (res.classid == sch->handle)
@@ -634,13 +600,14 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->qstats.drops++;
return NET_XMIT_DROP;
} else {
- cl->bstats.packets++;
+ cl->bstats.packets +=
+ skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
cl->bstats.bytes += skb->len;
htb_activate(q, cl);
}
sch->q.qlen++;
- sch->bstats.packets++;
+ sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
sch->bstats.bytes += skb->len;
return NET_XMIT_SUCCESS;
}
@@ -677,34 +644,6 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS;
}
-#ifdef HTB_RATECM
-#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
-static void htb_rate_timer(unsigned long arg)
-{
- struct Qdisc *sch = (struct Qdisc *)arg;
- struct htb_sched *q = qdisc_priv(sch);
- struct hlist_node *p;
- struct htb_class *cl;
-
-
- /* lock queue so that we can muck with it */
- spin_lock_bh(&sch->dev->queue_lock);
-
- q->rttim.expires = jiffies + HZ;
- add_timer(&q->rttim);
-
- /* scan and recompute one bucket at time */
- if (++q->recmp_bucket >= HTB_HSIZE)
- q->recmp_bucket = 0;
-
- hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
- RT_GEN(cl->sum_bytes, cl->rate_bytes);
- RT_GEN(cl->sum_packets, cl->rate_packets);
- }
- spin_unlock_bh(&sch->dev->queue_lock);
-}
-#endif
-
/**
* htb_charge_class - charges amount "bytes" to leaf and ancestors
*
@@ -717,8 +656,9 @@ static void htb_rate_timer(unsigned long arg)
* In such case we remove class from event queue first.
*/
static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
- int level, int bytes)
+ int level, struct sk_buff *skb)
{
+ int bytes = skb->len;
long toks, diff;
enum htb_cmode old_mode;
@@ -750,16 +690,12 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff);
}
-#ifdef HTB_RATECM
- /* update rate counters */
- cl->sum_bytes += bytes;
- cl->sum_packets++;
-#endif
/* update byte stats except for leaves which are already updated */
if (cl->level) {
cl->bstats.bytes += bytes;
- cl->bstats.packets++;
+ cl->bstats.packets += skb_is_gso(skb)?
+ skb_shinfo(skb)->gso_segs:1;
}
cl = cl->parent;
}
@@ -943,7 +879,7 @@ next:
gives us slightly better performance */
if (!cl->un.leaf.q->q.qlen)
htb_deactivate(q, cl);
- htb_charge_class(q, cl, level, skb->len);
+ htb_charge_class(q, cl, level, skb);
}
return skb;
}
@@ -976,8 +912,9 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
if (q->now >= q->near_ev_cache[level]) {
event = htb_do_events(q, level);
- q->near_ev_cache[level] = event ? event :
- PSCHED_TICKS_PER_SEC;
+ if (!event)
+ event = q->now + PSCHED_TICKS_PER_SEC;
+ q->near_ev_cache[level] = event;
} else
event = q->near_ev_cache[level];
@@ -1094,13 +1031,6 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
q->direct_qlen = 2;
-#ifdef HTB_RATECM
- init_timer(&q->rttim);
- q->rttim.function = htb_rate_timer;
- q->rttim.data = (unsigned long)sch;
- q->rttim.expires = jiffies + HZ;
- add_timer(&q->rttim);
-#endif
if ((q->rate2quantum = gopt->rate2quantum) < 1)
q->rate2quantum = 1;
q->defcls = gopt->defcls;
@@ -1174,11 +1104,6 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
{
struct htb_class *cl = (struct htb_class *)arg;
-#ifdef HTB_RATECM
- cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
- cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
-#endif
-
if (!cl->level && cl->un.leaf.q)
cl->qstats.qlen = cl->un.leaf.q->q.qlen;
cl->xstats.tokens = cl->tokens;
@@ -1276,6 +1201,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
BUG_TRAP(cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q);
}
+ gen_kill_estimator(&cl->bstats, &cl->rate_est);
qdisc_put_rtab(cl->rate);
qdisc_put_rtab(cl->ceil);
@@ -1304,9 +1230,6 @@ static void htb_destroy(struct Qdisc *sch)
struct htb_sched *q = qdisc_priv(sch);
qdisc_watchdog_cancel(&q->watchdog);
-#ifdef HTB_RATECM
- del_timer_sync(&q->rttim);
-#endif
/* This line used to be after htb_destroy_class call below
and surprisingly it worked in 2.4. But it must precede it
because filter need its target class alive to be able to call
@@ -1402,6 +1325,20 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!cl) { /* new class */
struct Qdisc *new_q;
int prio;
+ struct {
+ struct rtattr rta;
+ struct gnet_estimator opt;
+ } est = {
+ .rta = {
+ .rta_len = RTA_LENGTH(sizeof(est.opt)),
+ .rta_type = TCA_RATE,
+ },
+ .opt = {
+ /* 4s interval, 16s averaging constant */
+ .interval = 2,
+ .ewma_log = 2,
+ },
+ };
/* check for valid classid */
if (!classid || TC_H_MAJ(classid ^ sch->handle)
@@ -1417,6 +1354,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
goto failure;
+ gen_new_estimator(&cl->bstats, &cl->rate_est,
+ &sch->dev->queue_lock,
+ tca[TCA_RATE-1] ? : &est.rta);
cl->refcnt = 1;
INIT_LIST_HEAD(&cl->sibling);
INIT_HLIST_NODE(&cl->hlist);
@@ -1468,8 +1408,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
list_add_tail(&cl->sibling,
parent ? &parent->children : &q->root);
- } else
+ } else {
+ if (tca[TCA_RATE-1])
+ gen_replace_estimator(&cl->bstats, &cl->rate_est,
+ &sch->dev->queue_lock,
+ tca[TCA_RATE-1]);
sch_tree_lock(sch);
+ }
/* it used to be a nasty bug here, we have to check that node
is really leaf before changing cl->un.leaf ! */
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f8b9f1cdf738..51f16b0af198 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -9,21 +9,14 @@
#include <linux/module.h>
#include <linux/types.h>
+#include <linux/list.h>
#include <linux/skbuff.h>
-#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/netfilter.h>
-#include <linux/smp.h>
#include <net/netlink.h>
#include <net/pkt_sched.h>
-#include <asm/byteorder.h>
-#include <asm/uaccess.h>
-#include <linux/kmod.h>
-#include <linux/stat.h>
-#include <linux/interrupt.h>
-#include <linux/list.h>
#undef DEBUG_INGRESS
@@ -171,31 +164,12 @@ static int ingress_enqueue(struct sk_buff *skb,struct Qdisc *sch)
result = TC_ACT_OK;
break;
}
-/* backward compat */
-#else
-#ifdef CONFIG_NET_CLS_POLICE
- switch (result) {
- case TC_POLICE_SHOT:
- result = NF_DROP;
- sch->qstats.drops++;
- break;
- case TC_POLICE_RECLASSIFY: /* DSCP remarking here ? */
- case TC_POLICE_OK:
- case TC_POLICE_UNSPEC:
- default:
- sch->bstats.packets++;
- sch->bstats.bytes += skb->len;
- result = NF_ACCEPT;
- break;
- }
-
#else
D2PRINTK("Overriding result to ACCEPT\n");
result = NF_ACCEPT;
sch->bstats.packets++;
sch->bstats.bytes += skb->len;
#endif
-#endif
return result;
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 5d9d8bc9cc3a..9e5e87e81f00 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -14,11 +14,9 @@
*/
#include <linux/module.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index 6d7542c26e47..2d8c08493d6e 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -12,37 +12,23 @@
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/route.h>
#include <linux/skbuff.h>
#include <net/netlink.h>
-#include <net/sock.h>
#include <net/pkt_sched.h>
struct prio_sched_data
{
int bands;
+ int curband; /* for round-robin */
struct tcf_proto *filter_list;
u8 prio2band[TC_PRIO_MAX+1];
struct Qdisc *queues[TCQ_PRIO_BANDS];
+ int mq;
};
@@ -70,14 +56,17 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
#endif
if (TC_H_MAJ(band))
band = 0;
- return q->queues[q->prio2band[band&TC_PRIO_MAX]];
+ band = q->prio2band[band&TC_PRIO_MAX];
+ goto out;
}
band = res.classid;
}
band = TC_H_MIN(band) - 1;
if (band >= q->bands)
- return q->queues[q->prio2band[0]];
-
+ band = q->prio2band[0];
+out:
+ if (q->mq)
+ skb_set_queue_mapping(skb, band);
return q->queues[band];
}
@@ -144,17 +133,58 @@ prio_dequeue(struct Qdisc* sch)
struct Qdisc *qdisc;
for (prio = 0; prio < q->bands; prio++) {
- qdisc = q->queues[prio];
- skb = qdisc->dequeue(qdisc);
- if (skb) {
- sch->q.qlen--;
- return skb;
+ /* Check if the target subqueue is available before
+ * pulling an skb. This way we avoid excessive requeues
+ * for slower queues.
+ */
+ if (!netif_subqueue_stopped(sch->dev, (q->mq ? prio : 0))) {
+ qdisc = q->queues[prio];
+ skb = qdisc->dequeue(qdisc);
+ if (skb) {
+ sch->q.qlen--;
+ return skb;
+ }
}
}
return NULL;
}
+static struct sk_buff *rr_dequeue(struct Qdisc* sch)
+{
+ struct sk_buff *skb;
+ struct prio_sched_data *q = qdisc_priv(sch);
+ struct Qdisc *qdisc;
+ int bandcount;
+
+ /* Only take one pass through the queues. If nothing is available,
+ * return nothing.
+ */
+ for (bandcount = 0; bandcount < q->bands; bandcount++) {
+ /* Check if the target subqueue is available before
+ * pulling an skb. This way we avoid excessive requeues
+ * for slower queues. If the queue is stopped, try the
+ * next queue.
+ */
+ if (!netif_subqueue_stopped(sch->dev,
+ (q->mq ? q->curband : 0))) {
+ qdisc = q->queues[q->curband];
+ skb = qdisc->dequeue(qdisc);
+ if (skb) {
+ sch->q.qlen--;
+ q->curband++;
+ if (q->curband >= q->bands)
+ q->curband = 0;
+ return skb;
+ }
+ }
+ q->curband++;
+ if (q->curband >= q->bands)
+ q->curband = 0;
+ }
+ return NULL;
+}
+
static unsigned int prio_drop(struct Qdisc* sch)
{
struct prio_sched_data *q = qdisc_priv(sch);
@@ -198,21 +228,41 @@ prio_destroy(struct Qdisc* sch)
static int prio_tune(struct Qdisc *sch, struct rtattr *opt)
{
struct prio_sched_data *q = qdisc_priv(sch);
- struct tc_prio_qopt *qopt = RTA_DATA(opt);
+ struct tc_prio_qopt *qopt;
+ struct rtattr *tb[TCA_PRIO_MAX];
int i;
- if (opt->rta_len < RTA_LENGTH(sizeof(*qopt)))
+ if (rtattr_parse_nested_compat(tb, TCA_PRIO_MAX, opt, qopt,
+ sizeof(*qopt)))
return -EINVAL;
- if (qopt->bands > TCQ_PRIO_BANDS || qopt->bands < 2)
+ q->bands = qopt->bands;
+ /* If we're multiqueue, make sure the number of incoming bands
+ * matches the number of queues on the device we're associating with.
+ * If the number of bands requested is zero, then set q->bands to
+ * dev->egress_subqueue_count.
+ */
+ q->mq = RTA_GET_FLAG(tb[TCA_PRIO_MQ - 1]);
+ if (q->mq) {
+ if (sch->handle != TC_H_ROOT)
+ return -EINVAL;
+ if (netif_is_multiqueue(sch->dev)) {
+ if (q->bands == 0)
+ q->bands = sch->dev->egress_subqueue_count;
+ else if (q->bands != sch->dev->egress_subqueue_count)
+ return -EINVAL;
+ } else
+ return -EOPNOTSUPP;
+ }
+
+ if (q->bands > TCQ_PRIO_BANDS || q->bands < 2)
return -EINVAL;
for (i=0; i<=TC_PRIO_MAX; i++) {
- if (qopt->priomap[i] >= qopt->bands)
+ if (qopt->priomap[i] >= q->bands)
return -EINVAL;
}
sch_tree_lock(sch);
- q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
for (i=q->bands; i<TCQ_PRIO_BANDS; i++) {
@@ -268,11 +318,17 @@ static int prio_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct prio_sched_data *q = qdisc_priv(sch);
unsigned char *b = skb_tail_pointer(skb);
+ struct rtattr *nest;
struct tc_prio_qopt opt;
opt.bands = q->bands;
memcpy(&opt.priomap, q->prio2band, TC_PRIO_MAX+1);
- RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+
+ nest = RTA_NEST_COMPAT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+ if (q->mq)
+ RTA_PUT_FLAG(skb, TCA_PRIO_MQ);
+ RTA_NEST_COMPAT_END(skb, nest);
+
return skb->len;
rtattr_failure:
@@ -443,17 +499,44 @@ static struct Qdisc_ops prio_qdisc_ops = {
.owner = THIS_MODULE,
};
+static struct Qdisc_ops rr_qdisc_ops = {
+ .next = NULL,
+ .cl_ops = &prio_class_ops,
+ .id = "rr",
+ .priv_size = sizeof(struct prio_sched_data),
+ .enqueue = prio_enqueue,
+ .dequeue = rr_dequeue,
+ .requeue = prio_requeue,
+ .drop = prio_drop,
+ .init = prio_init,
+ .reset = prio_reset,
+ .destroy = prio_destroy,
+ .change = prio_tune,
+ .dump = prio_dump,
+ .owner = THIS_MODULE,
+};
+
static int __init prio_module_init(void)
{
- return register_qdisc(&prio_qdisc_ops);
+ int err;
+
+ err = register_qdisc(&prio_qdisc_ops);
+ if (err < 0)
+ return err;
+ err = register_qdisc(&rr_qdisc_ops);
+ if (err < 0)
+ unregister_qdisc(&prio_qdisc_ops);
+ return err;
}
static void __exit prio_module_exit(void)
{
unregister_qdisc(&prio_qdisc_ops);
+ unregister_qdisc(&rr_qdisc_ops);
}
module_init(prio_module_init)
module_exit(prio_module_exit)
MODULE_LICENSE("GPL");
+MODULE_ALIAS("sch_rr");
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index 00db53eb8159..9b95fefb70f4 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -17,7 +17,6 @@
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <net/pkt_sched.h>
#include <net/inet_ecn.h>
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 96dfdf78d32c..957957309859 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -10,31 +10,17 @@
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
#include <linux/init.h>
-#include <net/ip.h>
-#include <net/netlink.h>
#include <linux/ipv6.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
+#include <net/ip.h>
+#include <net/netlink.h>
#include <net/pkt_sched.h>
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 53862953baaf..8c2639af4c6a 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -13,29 +13,12 @@
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/jiffies.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
-#include <net/ip.h>
-#include <net/netlink.h>
-#include <net/route.h>
#include <linux/skbuff.h>
-#include <net/sock.h>
+#include <net/netlink.h>
#include <net/pkt_sched.h>
@@ -142,7 +125,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (skb->len > q->max_size) {
sch->qstats.drops++;
-#ifdef CONFIG_NET_CLS_POLICE
+#ifdef CONFIG_NET_CLS_ACT
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
#endif
kfree_skb(skb);
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index f05ad9a30b4c..0968184ea6be 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -9,30 +9,17 @@
*/
#include <linux/module.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/socket.h>
-#include <linux/sockios.h>
-#include <linux/in.h>
#include <linux/errno.h>
-#include <linux/interrupt.h>
#include <linux/if_arp.h>
-#include <linux/if_ether.h>
-#include <linux/inet.h>
#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/notifier.h>
#include <linux/init.h>
-#include <net/ip.h>
-#include <net/route.h>
#include <linux/skbuff.h>
#include <linux/moduleparam.h>
-#include <net/sock.h>
+#include <net/dst.h>
+#include <net/neighbour.h>
#include <net/pkt_sched.h>
/*
@@ -225,7 +212,6 @@ static int teql_qdisc_init(struct Qdisc *sch, struct rtattr *opt)
return 0;
}
-/* "teql*" netdevice routines */
static int
__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res, struct net_device *dev)
@@ -277,6 +263,7 @@ static int teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
int busy;
int nores;
int len = skb->len;
+ int subq = skb->queue_mapping;
struct sk_buff *skb_res = NULL;
start = master->slaves;
@@ -293,7 +280,9 @@ restart:
if (slave->qdisc_sleeping != q)
continue;
- if (netif_queue_stopped(slave) || ! netif_running(slave)) {
+ if (netif_queue_stopped(slave) ||
+ netif_subqueue_stopped(slave, subq) ||
+ !netif_running(slave)) {
busy = 1;
continue;
}
@@ -302,6 +291,7 @@ restart:
case 0:
if (netif_tx_trylock(slave)) {
if (!netif_queue_stopped(slave) &&
+ !netif_subqueue_stopped(slave, subq) &&
slave->hard_start_xmit(skb, slave) == 0) {
netif_tx_unlock(slave);
master->slaves = NEXT_SLAVE(q);