summaryrefslogtreecommitdiff
path: root/net/sched
diff options
context:
space:
mode:
Diffstat (limited to 'net/sched')
-rw-r--r--net/sched/Kconfig14
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c118
-rw-r--r--net/sched/act_ct.c10
-rw-r--r--net/sched/act_ctinfo.c8
-rw-r--r--net/sched/act_gate.c14
-rw-r--r--net/sched/act_mirred.c28
-rw-r--r--net/sched/act_mpls.c18
-rw-r--r--net/sched/act_police.c6
-rw-r--r--net/sched/act_tunnel_key.c10
-rw-r--r--net/sched/bpf_qdisc.c475
-rw-r--r--net/sched/cls_api.c198
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flow.c5
-rw-r--r--net/sched/cls_flower.c9
-rw-r--r--net/sched/cls_matchall.c2
-rw-r--r--net/sched/cls_u32.c22
-rw-r--r--net/sched/em_meta.c2
-rw-r--r--net/sched/sch_api.c309
-rw-r--r--net/sched/sch_cake.c185
-rw-r--r--net/sched/sch_cbs.c2
-rw-r--r--net/sched/sch_choke.c4
-rw-r--r--net/sched/sch_codel.c12
-rw-r--r--net/sched/sch_drr.c16
-rw-r--r--net/sched/sch_ets.c21
-rw-r--r--net/sched/sch_fifo.c3
-rw-r--r--net/sched/sch_fq.c58
-rw-r--r--net/sched/sch_fq_codel.c11
-rw-r--r--net/sched/sch_fq_pie.c12
-rw-r--r--net/sched/sch_frag.c10
-rw-r--r--net/sched/sch_generic.c70
-rw-r--r--net/sched/sch_gred.c9
-rw-r--r--net/sched/sch_hfsc.c46
-rw-r--r--net/sched/sch_hhf.c2
-rw-r--r--net/sched/sch_htb.c17
-rw-r--r--net/sched/sch_netem.c23
-rw-r--r--net/sched/sch_pie.c11
-rw-r--r--net/sched/sch_prio.c2
-rw-r--r--net/sched/sch_qfq.c25
-rw-r--r--net/sched/sch_red.c14
-rw-r--r--net/sched/sch_sfb.c4
-rw-r--r--net/sched/sch_sfq.c114
-rw-r--r--net/sched/sch_skbprio.c3
-rw-r--r--net/sched/sch_taprio.c14
-rw-r--r--net/sched/sch_tbf.c20
45 files changed, 1368 insertions, 591 deletions
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 8180d0c12fce..ad914d2b2e22 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -403,6 +403,18 @@ config NET_SCH_ETS
If unsure, say N.
+config NET_SCH_BPF
+ bool "BPF-based Qdisc"
+ depends on BPF_SYSCALL && BPF_JIT && DEBUG_INFO_BTF
+ help
+ This option allows BPF-based queueing disiplines. With BPF struct_ops,
+ users can implement supported operators in Qdisc_ops using BPF programs.
+ The queue holding skb can be built with BPF maps or graphs.
+
+ Say Y here if you want to use BPF-based Qdisc.
+
+ If unsure, say N.
+
menuconfig NET_SCH_DEFAULT
bool "Allow override default queue discipline"
help
@@ -784,7 +796,7 @@ config NET_ACT_SKBEDIT
config NET_ACT_CSUM
tristate "Checksum Updating"
depends on NET_CLS_ACT && INET
- select LIBCRC32C
+ select NET_CRC32C
help
Say Y here to update some common checksum after some direct
packet alterations.
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 82c3f78ca486..904d784902d1 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -62,6 +62,7 @@ obj-$(CONFIG_NET_SCH_FQ_PIE) += sch_fq_pie.o
obj-$(CONFIG_NET_SCH_CBS) += sch_cbs.o
obj-$(CONFIG_NET_SCH_ETF) += sch_etf.o
obj-$(CONFIG_NET_SCH_TAPRIO) += sch_taprio.o
+obj-$(CONFIG_NET_SCH_BPF) += bpf_qdisc.o
obj-$(CONFIG_NET_CLS_U32) += cls_u32.o
obj-$(CONFIG_NET_CLS_ROUTE4) += cls_route.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index eecad65fec92..057e20cef375 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -504,6 +504,50 @@ nla_put_failure:
return -1;
}
+static int
+tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct nlattr *nest;
+ int err = -EINVAL;
+ u32 flags;
+
+ if (tcf_action_dump_terse(skb, a, false))
+ goto nla_put_failure;
+
+ if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
+ nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
+ a->hw_stats, TCA_ACT_HW_STATS_ANY))
+ goto nla_put_failure;
+
+ if (a->used_hw_stats_valid &&
+ nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
+ a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
+ goto nla_put_failure;
+
+ flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
+ if (flags &&
+ nla_put_bitfield32(skb, TCA_ACT_FLAGS,
+ flags, flags))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
+ goto nla_put_failure;
+
+ nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS);
+ if (nest == NULL)
+ goto nla_put_failure;
+ err = tcf_action_dump_old(skb, a, bind, ref);
+ if (err > 0) {
+ nla_nest_end(skb, nest);
+ return err;
+ }
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
static int tcf_dump_walker(struct tcf_idrinfo *idrinfo, struct sk_buff *skb,
struct netlink_callback *cb)
{
@@ -1190,51 +1234,6 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
return a->ops->dump(skb, a, bind, ref);
}
-int
-tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
-{
- int err = -EINVAL;
- unsigned char *b = skb_tail_pointer(skb);
- struct nlattr *nest;
- u32 flags;
-
- if (tcf_action_dump_terse(skb, a, false))
- goto nla_put_failure;
-
- if (a->hw_stats != TCA_ACT_HW_STATS_ANY &&
- nla_put_bitfield32(skb, TCA_ACT_HW_STATS,
- a->hw_stats, TCA_ACT_HW_STATS_ANY))
- goto nla_put_failure;
-
- if (a->used_hw_stats_valid &&
- nla_put_bitfield32(skb, TCA_ACT_USED_HW_STATS,
- a->used_hw_stats, TCA_ACT_HW_STATS_ANY))
- goto nla_put_failure;
-
- flags = a->tcfa_flags & TCA_ACT_FLAGS_USER_MASK;
- if (flags &&
- nla_put_bitfield32(skb, TCA_ACT_FLAGS,
- flags, flags))
- goto nla_put_failure;
-
- if (nla_put_u32(skb, TCA_ACT_IN_HW_COUNT, a->in_hw_count))
- goto nla_put_failure;
-
- nest = nla_nest_start_noflag(skb, TCA_ACT_OPTIONS);
- if (nest == NULL)
- goto nla_put_failure;
- err = tcf_action_dump_old(skb, a, bind, ref);
- if (err > 0) {
- nla_nest_end(skb, nest);
- return err;
- }
-
-nla_put_failure:
- nlmsg_trim(skb, b);
- return -1;
-}
-EXPORT_SYMBOL(tcf_action_dump_1);
-
int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[],
int bind, int ref, bool terse)
{
@@ -1462,17 +1461,29 @@ int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct netlink_ext_ack *extack)
{
struct tc_action_ops *ops[TCA_ACT_MAX_PRIO] = {};
- struct nlattr *tb[TCA_ACT_MAX_PRIO + 1];
+ struct nlattr *tb[TCA_ACT_MAX_PRIO + 2];
struct tc_action *act;
size_t sz = 0;
int err;
int i;
- err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO, nla, NULL,
+ err = nla_parse_nested_deprecated(tb, TCA_ACT_MAX_PRIO + 1, nla, NULL,
extack);
if (err < 0)
return err;
+ /* The nested attributes are parsed as types, but they are really an
+ * array of actions. So we parse one more than we can handle, and return
+ * an error if the last one is set (as that indicates that the request
+ * contained more than the maximum number of actions).
+ */
+ if (tb[TCA_ACT_MAX_PRIO + 1]) {
+ NL_SET_ERR_MSG_FMT(extack,
+ "Only %d actions supported per filter",
+ TCA_ACT_MAX_PRIO);
+ return -EINVAL;
+ }
+
for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) {
struct tc_action_ops *a_o;
@@ -2264,13 +2275,16 @@ out_module_put:
return skb->len;
}
+static const struct rtnl_msg_handler tc_action_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWACTION, .doit = tc_ctl_action},
+ {.msgtype = RTM_DELACTION, .doit = tc_ctl_action},
+ {.msgtype = RTM_GETACTION, .doit = tc_ctl_action,
+ .dumpit = tc_dump_action},
+};
+
static int __init tc_action_init(void)
{
- rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action,
- 0);
-
+ rtnl_register_many(tc_action_rtnl_msg_handlers);
return 0;
}
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index 2197eb625658..c02f39efc6ef 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -1183,9 +1183,8 @@ static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
range->min_addr.ip =
nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
- range->max_addr.ip = max_attr ?
- nla_get_in_addr(max_attr) :
- range->min_addr.ip;
+ range->max_addr.ip =
+ nla_get_in_addr_default(max_attr, range->min_addr.ip);
} else if (tb[TCA_CT_NAT_IPV6_MIN]) {
struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
@@ -1314,8 +1313,9 @@ static int tcf_ct_fill_params(struct net *net,
err = -EINVAL;
goto err;
}
- family = tb[TCA_CT_HELPER_FAMILY] ? nla_get_u8(tb[TCA_CT_HELPER_FAMILY]) : AF_INET;
- proto = tb[TCA_CT_HELPER_PROTO] ? nla_get_u8(tb[TCA_CT_HELPER_PROTO]) : IPPROTO_TCP;
+ family = nla_get_u8_default(tb[TCA_CT_HELPER_FAMILY], AF_INET);
+ proto = nla_get_u8_default(tb[TCA_CT_HELPER_PROTO],
+ IPPROTO_TCP);
err = nf_ct_add_helper(tmpl, name, family, proto,
p->ct_action & TCA_CT_ACT_NAT, &p->helper);
if (err) {
diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c
index 5dd41a012110..5b1241ddc758 100644
--- a/net/sched/act_ctinfo.c
+++ b/net/sched/act_ctinfo.c
@@ -197,8 +197,9 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
"dscp mask must be 6 contiguous bits");
return -EINVAL;
}
- dscpstatemask = tb[TCA_CTINFO_PARMS_DSCP_STATEMASK] ?
- nla_get_u32(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK]) : 0;
+ dscpstatemask =
+ nla_get_u32_default(tb[TCA_CTINFO_PARMS_DSCP_STATEMASK],
+ 0);
/* mask & statemask must not overlap */
if (dscpmask & dscpstatemask) {
NL_SET_ERR_MSG_ATTR(extack,
@@ -243,8 +244,7 @@ static int tcf_ctinfo_init(struct net *net, struct nlattr *nla,
}
cp_new->net = net;
- cp_new->zone = tb[TCA_CTINFO_ZONE] ?
- nla_get_u16(tb[TCA_CTINFO_ZONE]) : 0;
+ cp_new->zone = nla_get_u16_default(tb[TCA_CTINFO_ZONE], 0);
if (dscpmask) {
cp_new->dscpmask = dscpmask;
cp_new->dscpmaskshift = dscpmaskshift;
diff --git a/net/sched/act_gate.c b/net/sched/act_gate.c
index 1dd74125398a..c1f75f272757 100644
--- a/net/sched/act_gate.c
+++ b/net/sched/act_gate.c
@@ -190,15 +190,10 @@ static int fill_gate_entry(struct nlattr **tb, struct tcfg_gate_entry *entry,
entry->interval = interval;
- if (tb[TCA_GATE_ENTRY_IPV])
- entry->ipv = nla_get_s32(tb[TCA_GATE_ENTRY_IPV]);
- else
- entry->ipv = -1;
+ entry->ipv = nla_get_s32_default(tb[TCA_GATE_ENTRY_IPV], -1);
- if (tb[TCA_GATE_ENTRY_MAX_OCTETS])
- entry->maxoctets = nla_get_s32(tb[TCA_GATE_ENTRY_MAX_OCTETS]);
- else
- entry->maxoctets = -1;
+ entry->maxoctets = nla_get_s32_default(tb[TCA_GATE_ENTRY_MAX_OCTETS],
+ -1);
return 0;
}
@@ -292,8 +287,7 @@ static void gate_setup_timer(struct tcf_gate *gact, u64 basetime,
gact->param.tcfg_basetime = basetime;
gact->param.tcfg_clockid = clockid;
gact->tk_offset = tko;
- hrtimer_init(&gact->hitimer, clockid, HRTIMER_MODE_ABS_SOFT);
- gact->hitimer.function = gate_timer_func;
+ hrtimer_setup(&gact->hitimer, gate_timer_func, clockid, HRTIMER_MODE_ABS_SOFT);
}
static int tcf_gate_init(struct net *net, struct nlattr *nla,
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 5b3814365924..5f01f567c934 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -30,7 +30,29 @@ static LIST_HEAD(mirred_list);
static DEFINE_SPINLOCK(mirred_list_lock);
#define MIRRED_NEST_LIMIT 4
-static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
+
+#ifndef CONFIG_PREEMPT_RT
+static u8 tcf_mirred_nest_level_inc_return(void)
+{
+ return __this_cpu_inc_return(softnet_data.xmit.sched_mirred_nest);
+}
+
+static void tcf_mirred_nest_level_dec(void)
+{
+ __this_cpu_dec(softnet_data.xmit.sched_mirred_nest);
+}
+
+#else
+static u8 tcf_mirred_nest_level_inc_return(void)
+{
+ return current->net_xmit.sched_mirred_nest++;
+}
+
+static void tcf_mirred_nest_level_dec(void)
+{
+ current->net_xmit.sched_mirred_nest--;
+}
+#endif
static bool tcf_mirred_is_act_redirect(int action)
{
@@ -423,7 +445,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
int m_eaction;
u32 blockid;
- nest_level = __this_cpu_inc_return(mirred_nest_level);
+ nest_level = tcf_mirred_nest_level_inc_return();
if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
netdev_name(skb->dev));
@@ -454,7 +476,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
retval);
dec_nest_level:
- __this_cpu_dec(mirred_nest_level);
+ tcf_mirred_nest_level_dec();
return retval;
}
diff --git a/net/sched/act_mpls.c b/net/sched/act_mpls.c
index 44a37a71ae92..9f86f4e666d3 100644
--- a/net/sched/act_mpls.c
+++ b/net/sched/act_mpls.c
@@ -288,16 +288,14 @@ static int tcf_mpls_init(struct net *net, struct nlattr *nla,
}
p->tcfm_action = parm->m_action;
- p->tcfm_label = tb[TCA_MPLS_LABEL] ? nla_get_u32(tb[TCA_MPLS_LABEL]) :
- ACT_MPLS_LABEL_NOT_SET;
- p->tcfm_tc = tb[TCA_MPLS_TC] ? nla_get_u8(tb[TCA_MPLS_TC]) :
- ACT_MPLS_TC_NOT_SET;
- p->tcfm_ttl = tb[TCA_MPLS_TTL] ? nla_get_u8(tb[TCA_MPLS_TTL]) :
- mpls_ttl;
- p->tcfm_bos = tb[TCA_MPLS_BOS] ? nla_get_u8(tb[TCA_MPLS_BOS]) :
- ACT_MPLS_BOS_NOT_SET;
- p->tcfm_proto = tb[TCA_MPLS_PROTO] ? nla_get_be16(tb[TCA_MPLS_PROTO]) :
- htons(ETH_P_MPLS_UC);
+ p->tcfm_label = nla_get_u32_default(tb[TCA_MPLS_LABEL],
+ ACT_MPLS_LABEL_NOT_SET);
+ p->tcfm_tc = nla_get_u8_default(tb[TCA_MPLS_TC], ACT_MPLS_TC_NOT_SET);
+ p->tcfm_ttl = nla_get_u8_default(tb[TCA_MPLS_TTL], mpls_ttl);
+ p->tcfm_bos = nla_get_u8_default(tb[TCA_MPLS_BOS],
+ ACT_MPLS_BOS_NOT_SET);
+ p->tcfm_proto = nla_get_be16_default(tb[TCA_MPLS_PROTO],
+ htons(ETH_P_MPLS_UC));
spin_lock_bh(&m->tcf_lock);
goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 8555125ed34d..a214ed681142 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -167,8 +167,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
if (R_tab) {
new->rate_present = true;
- rate64 = tb[TCA_POLICE_RATE64] ?
- nla_get_u64(tb[TCA_POLICE_RATE64]) : 0;
+ rate64 = nla_get_u64_default(tb[TCA_POLICE_RATE64], 0);
psched_ratecfg_precompute(&new->rate, &R_tab->rate, rate64);
qdisc_put_rtab(R_tab);
} else {
@@ -176,8 +175,7 @@ static int tcf_police_init(struct net *net, struct nlattr *nla,
}
if (P_tab) {
new->peak_present = true;
- prate64 = tb[TCA_POLICE_PEAKRATE64] ?
- nla_get_u64(tb[TCA_POLICE_PEAKRATE64]) : 0;
+ prate64 = nla_get_u64_default(tb[TCA_POLICE_PEAKRATE64], 0);
psched_ratecfg_precompute(&new->peak, &P_tab->rate, prate64);
qdisc_put_rtab(P_tab);
} else {
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index af7c99845948..2cef4b08befb 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -68,7 +68,7 @@ geneve_opt_policy[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX + 1] = {
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
[TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
- .len = 128 },
+ .len = 127 },
};
static const struct nla_policy
@@ -571,8 +571,8 @@ static void tunnel_key_release(struct tc_action *a)
static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
const struct ip_tunnel_info *info)
{
+ const u8 *src = ip_tunnel_info_opts(info);
int len = info->options_len;
- u8 *src = (u8 *)(info + 1);
struct nlattr *start;
start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_GENEVE);
@@ -580,7 +580,7 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
return -EMSGSIZE;
while (len > 0) {
- struct geneve_opt *opt = (struct geneve_opt *)src;
+ const struct geneve_opt *opt = (const struct geneve_opt *)src;
if (nla_put_be16(skb, TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS,
opt->opt_class) ||
@@ -603,7 +603,7 @@ static int tunnel_key_geneve_opts_dump(struct sk_buff *skb,
static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb,
const struct ip_tunnel_info *info)
{
- struct vxlan_metadata *md = (struct vxlan_metadata *)(info + 1);
+ const struct vxlan_metadata *md = ip_tunnel_info_opts(info);
struct nlattr *start;
start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_VXLAN);
@@ -622,7 +622,7 @@ static int tunnel_key_vxlan_opts_dump(struct sk_buff *skb,
static int tunnel_key_erspan_opts_dump(struct sk_buff *skb,
const struct ip_tunnel_info *info)
{
- struct erspan_metadata *md = (struct erspan_metadata *)(info + 1);
+ const struct erspan_metadata *md = ip_tunnel_info_opts(info);
struct nlattr *start;
start = nla_nest_start_noflag(skb, TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN);
diff --git a/net/sched/bpf_qdisc.c b/net/sched/bpf_qdisc.c
new file mode 100644
index 000000000000..7ea8b54b2ab1
--- /dev/null
+++ b/net/sched/bpf_qdisc.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+#include <linux/bpf_verifier.h>
+#include <linux/bpf.h>
+#include <linux/btf.h>
+#include <linux/filter.h>
+#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
+
+#define QDISC_OP_IDX(op) (offsetof(struct Qdisc_ops, op) / sizeof(void (*)(void)))
+#define QDISC_MOFF_IDX(moff) (moff / sizeof(void (*)(void)))
+
+static struct bpf_struct_ops bpf_Qdisc_ops;
+
+struct bpf_sched_data {
+ struct qdisc_watchdog watchdog;
+};
+
+struct bpf_sk_buff_ptr {
+ struct sk_buff *skb;
+};
+
+static int bpf_qdisc_init(struct btf *btf)
+{
+ return 0;
+}
+
+BTF_ID_LIST_SINGLE(bpf_qdisc_ids, struct, Qdisc)
+BTF_ID_LIST_SINGLE(bpf_sk_buff_ids, struct, sk_buff)
+BTF_ID_LIST_SINGLE(bpf_sk_buff_ptr_ids, struct, bpf_sk_buff_ptr)
+
+static bool bpf_qdisc_is_valid_access(int off, int size,
+ enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info)
+{
+ struct btf *btf = prog->aux->attach_btf;
+ u32 arg;
+
+ arg = btf_ctx_arg_idx(btf, prog->aux->attach_func_proto, off);
+ if (prog->aux->attach_st_ops_member_off == offsetof(struct Qdisc_ops, enqueue)) {
+ if (arg == 2 && type == BPF_READ) {
+ info->reg_type = PTR_TO_BTF_ID | PTR_TRUSTED;
+ info->btf = btf;
+ info->btf_id = bpf_sk_buff_ptr_ids[0];
+ return true;
+ }
+ }
+
+ return bpf_tracing_btf_ctx_access(off, size, type, prog, info);
+}
+
+static int bpf_qdisc_qdisc_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, size_t *end)
+{
+ switch (off) {
+ case offsetof(struct Qdisc, limit):
+ *end = offsetofend(struct Qdisc, limit);
+ break;
+ case offsetof(struct Qdisc, q) + offsetof(struct qdisc_skb_head, qlen):
+ *end = offsetof(struct Qdisc, q) + offsetofend(struct qdisc_skb_head, qlen);
+ break;
+ case offsetof(struct Qdisc, qstats) ... offsetofend(struct Qdisc, qstats) - 1:
+ *end = offsetofend(struct Qdisc, qstats);
+ break;
+ default:
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int bpf_qdisc_sk_buff_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, size_t *end)
+{
+ switch (off) {
+ case offsetof(struct sk_buff, tstamp):
+ *end = offsetofend(struct sk_buff, tstamp);
+ break;
+ case offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb, data[0]) ...
+ offsetof(struct sk_buff, cb) + offsetof(struct qdisc_skb_cb,
+ data[QDISC_CB_PRIV_LEN - 1]):
+ *end = offsetof(struct sk_buff, cb) +
+ offsetofend(struct qdisc_skb_cb, data[QDISC_CB_PRIV_LEN - 1]);
+ break;
+ default:
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+static int bpf_qdisc_btf_struct_access(struct bpf_verifier_log *log,
+ const struct bpf_reg_state *reg,
+ int off, int size)
+{
+ const struct btf_type *t, *skbt, *qdisct;
+ size_t end;
+ int err;
+
+ skbt = btf_type_by_id(reg->btf, bpf_sk_buff_ids[0]);
+ qdisct = btf_type_by_id(reg->btf, bpf_qdisc_ids[0]);
+ t = btf_type_by_id(reg->btf, reg->btf_id);
+
+ if (t == skbt) {
+ err = bpf_qdisc_sk_buff_access(log, reg, off, &end);
+ } else if (t == qdisct) {
+ err = bpf_qdisc_qdisc_access(log, reg, off, &end);
+ } else {
+ bpf_log(log, "only read is supported\n");
+ return -EACCES;
+ }
+
+ if (err) {
+ bpf_log(log, "no write support to %s at off %d\n",
+ btf_name_by_offset(reg->btf, t->name_off), off);
+ return -EACCES;
+ }
+
+ if (off + size > end) {
+ bpf_log(log,
+ "write access at off %d with size %d beyond the member of %s ended at %zu\n",
+ off, size, btf_name_by_offset(reg->btf, t->name_off), end);
+ return -EACCES;
+ }
+
+ return 0;
+}
+
+BTF_ID_LIST(bpf_qdisc_init_prologue_ids)
+BTF_ID(func, bpf_qdisc_init_prologue)
+
+static int bpf_qdisc_gen_prologue(struct bpf_insn *insn_buf, bool direct_write,
+ const struct bpf_prog *prog)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ if (prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, init))
+ return 0;
+
+ /* r6 = r1; // r6 will be "u64 *ctx". r1 is "u64 *ctx".
+ * r2 = r1[16]; // r2 will be "struct netlink_ext_ack *extack"
+ * r1 = r1[0]; // r1 will be "struct Qdisc *sch"
+ * r0 = bpf_qdisc_init_prologue(r1, r2);
+ * if r0 == 0 goto pc+1;
+ * BPF_EXIT;
+ * r1 = r6; // r1 will be "u64 *ctx".
+ */
+ *insn++ = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1, 16);
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
+ *insn++ = BPF_CALL_KFUNC(0, bpf_qdisc_init_prologue_ids[0]);
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1);
+ *insn++ = BPF_EXIT_INSN();
+ *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_6);
+ *insn++ = prog->insnsi[0];
+
+ return insn - insn_buf;
+}
+
+BTF_ID_LIST(bpf_qdisc_reset_destroy_epilogue_ids)
+BTF_ID(func, bpf_qdisc_reset_destroy_epilogue)
+
+static int bpf_qdisc_gen_epilogue(struct bpf_insn *insn_buf, const struct bpf_prog *prog,
+ s16 ctx_stack_off)
+{
+ struct bpf_insn *insn = insn_buf;
+
+ if (prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, reset) &&
+ prog->aux->attach_st_ops_member_off != offsetof(struct Qdisc_ops, destroy))
+ return 0;
+
+ /* r1 = stack[ctx_stack_off]; // r1 will be "u64 *ctx"
+ * r1 = r1[0]; // r1 will be "struct Qdisc *sch"
+ * r0 = bpf_qdisc_reset_destroy_epilogue(r1);
+ * BPF_EXIT;
+ */
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_FP, ctx_stack_off);
+ *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0);
+ *insn++ = BPF_CALL_KFUNC(0, bpf_qdisc_reset_destroy_epilogue_ids[0]);
+ *insn++ = BPF_EXIT_INSN();
+
+ return insn - insn_buf;
+}
+
+__bpf_kfunc_start_defs();
+
+/* bpf_skb_get_hash - Get the flow hash of an skb.
+ * @skb: The skb to get the flow hash from.
+ */
+__bpf_kfunc u32 bpf_skb_get_hash(struct sk_buff *skb)
+{
+ return skb_get_hash(skb);
+}
+
+/* bpf_kfree_skb - Release an skb's reference and drop it immediately.
+ * @skb: The skb whose reference to be released and dropped.
+ */
+__bpf_kfunc void bpf_kfree_skb(struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+/* bpf_qdisc_skb_drop - Drop an skb by adding it to a deferred free list.
+ * @skb: The skb whose reference to be released and dropped.
+ * @to_free_list: The list of skbs to be dropped.
+ */
+__bpf_kfunc void bpf_qdisc_skb_drop(struct sk_buff *skb,
+ struct bpf_sk_buff_ptr *to_free_list)
+{
+ __qdisc_drop(skb, (struct sk_buff **)to_free_list);
+}
+
+/* bpf_qdisc_watchdog_schedule - Schedule a qdisc to a later time using a timer.
+ * @sch: The qdisc to be scheduled.
+ * @expire: The expiry time of the timer.
+ * @delta_ns: The slack range of the timer.
+ */
+__bpf_kfunc void bpf_qdisc_watchdog_schedule(struct Qdisc *sch, u64 expire, u64 delta_ns)
+{
+ struct bpf_sched_data *q = qdisc_priv(sch);
+
+ qdisc_watchdog_schedule_range_ns(&q->watchdog, expire, delta_ns);
+}
+
+/* bpf_qdisc_init_prologue - Hidden kfunc called in prologue of .init. */
+__bpf_kfunc int bpf_qdisc_init_prologue(struct Qdisc *sch,
+ struct netlink_ext_ack *extack)
+{
+ struct bpf_sched_data *q = qdisc_priv(sch);
+ struct net_device *dev = qdisc_dev(sch);
+ struct Qdisc *p;
+
+ qdisc_watchdog_init(&q->watchdog, sch);
+
+ if (sch->parent != TC_H_ROOT) {
+ /* If qdisc_lookup() returns NULL, it means .init is called by
+ * qdisc_create_dflt() in mq/mqprio_init and the parent qdisc
+ * has not been added to qdisc_hash yet.
+ */
+ p = qdisc_lookup(dev, TC_H_MAJ(sch->parent));
+ if (p && !(p->flags & TCQ_F_MQROOT)) {
+ NL_SET_ERR_MSG(extack, "BPF qdisc only supported on root or mq");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/* bpf_qdisc_reset_destroy_epilogue - Hidden kfunc called in epilogue of .reset
+ * and .destroy
+ */
+__bpf_kfunc void bpf_qdisc_reset_destroy_epilogue(struct Qdisc *sch)
+{
+ struct bpf_sched_data *q = qdisc_priv(sch);
+
+ qdisc_watchdog_cancel(&q->watchdog);
+}
+
+/* bpf_qdisc_bstats_update - Update Qdisc basic statistics
+ * @sch: The qdisc from which an skb is dequeued.
+ * @skb: The skb to be dequeued.
+ */
+__bpf_kfunc void bpf_qdisc_bstats_update(struct Qdisc *sch, const struct sk_buff *skb)
+{
+ bstats_update(&sch->bstats, skb);
+}
+
+__bpf_kfunc_end_defs();
+
+BTF_KFUNCS_START(qdisc_kfunc_ids)
+BTF_ID_FLAGS(func, bpf_skb_get_hash, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_kfree_skb, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_qdisc_skb_drop, KF_RELEASE)
+BTF_ID_FLAGS(func, bpf_dynptr_from_skb, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_qdisc_watchdog_schedule, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_qdisc_init_prologue, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_qdisc_reset_destroy_epilogue, KF_TRUSTED_ARGS)
+BTF_ID_FLAGS(func, bpf_qdisc_bstats_update, KF_TRUSTED_ARGS)
+BTF_KFUNCS_END(qdisc_kfunc_ids)
+
+BTF_SET_START(qdisc_common_kfunc_set)
+BTF_ID(func, bpf_skb_get_hash)
+BTF_ID(func, bpf_kfree_skb)
+BTF_ID(func, bpf_dynptr_from_skb)
+BTF_SET_END(qdisc_common_kfunc_set)
+
+BTF_SET_START(qdisc_enqueue_kfunc_set)
+BTF_ID(func, bpf_qdisc_skb_drop)
+BTF_ID(func, bpf_qdisc_watchdog_schedule)
+BTF_SET_END(qdisc_enqueue_kfunc_set)
+
+BTF_SET_START(qdisc_dequeue_kfunc_set)
+BTF_ID(func, bpf_qdisc_watchdog_schedule)
+BTF_ID(func, bpf_qdisc_bstats_update)
+BTF_SET_END(qdisc_dequeue_kfunc_set)
+
+enum qdisc_ops_kf_flags {
+ QDISC_OPS_KF_COMMON = 0,
+ QDISC_OPS_KF_ENQUEUE = 1 << 0,
+ QDISC_OPS_KF_DEQUEUE = 1 << 1,
+};
+
+static const u32 qdisc_ops_context_flags[] = {
+ [QDISC_OP_IDX(enqueue)] = QDISC_OPS_KF_ENQUEUE,
+ [QDISC_OP_IDX(dequeue)] = QDISC_OPS_KF_DEQUEUE,
+ [QDISC_OP_IDX(init)] = QDISC_OPS_KF_COMMON,
+ [QDISC_OP_IDX(reset)] = QDISC_OPS_KF_COMMON,
+ [QDISC_OP_IDX(destroy)] = QDISC_OPS_KF_COMMON,
+};
+
+static int bpf_qdisc_kfunc_filter(const struct bpf_prog *prog, u32 kfunc_id)
+{
+ u32 moff, flags;
+
+ if (!btf_id_set8_contains(&qdisc_kfunc_ids, kfunc_id))
+ return 0;
+
+ if (prog->aux->st_ops != &bpf_Qdisc_ops)
+ return -EACCES;
+
+ moff = prog->aux->attach_st_ops_member_off;
+ flags = qdisc_ops_context_flags[QDISC_MOFF_IDX(moff)];
+
+ if ((flags & QDISC_OPS_KF_ENQUEUE) &&
+ btf_id_set_contains(&qdisc_enqueue_kfunc_set, kfunc_id))
+ return 0;
+
+ if ((flags & QDISC_OPS_KF_DEQUEUE) &&
+ btf_id_set_contains(&qdisc_dequeue_kfunc_set, kfunc_id))
+ return 0;
+
+ if (btf_id_set_contains(&qdisc_common_kfunc_set, kfunc_id))
+ return 0;
+
+ return -EACCES;
+}
+
+static const struct btf_kfunc_id_set bpf_qdisc_kfunc_set = {
+ .owner = THIS_MODULE,
+ .set = &qdisc_kfunc_ids,
+ .filter = bpf_qdisc_kfunc_filter,
+};
+
+static const struct bpf_verifier_ops bpf_qdisc_verifier_ops = {
+ .get_func_proto = bpf_base_func_proto,
+ .is_valid_access = bpf_qdisc_is_valid_access,
+ .btf_struct_access = bpf_qdisc_btf_struct_access,
+ .gen_prologue = bpf_qdisc_gen_prologue,
+ .gen_epilogue = bpf_qdisc_gen_epilogue,
+};
+
+static int bpf_qdisc_init_member(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata)
+{
+ const struct Qdisc_ops *uqdisc_ops;
+ struct Qdisc_ops *qdisc_ops;
+ u32 moff;
+
+ uqdisc_ops = (const struct Qdisc_ops *)udata;
+ qdisc_ops = (struct Qdisc_ops *)kdata;
+
+ moff = __btf_member_bit_offset(t, member) / 8;
+ switch (moff) {
+ case offsetof(struct Qdisc_ops, priv_size):
+ if (uqdisc_ops->priv_size)
+ return -EINVAL;
+ qdisc_ops->priv_size = sizeof(struct bpf_sched_data);
+ return 1;
+ case offsetof(struct Qdisc_ops, peek):
+ qdisc_ops->peek = qdisc_peek_dequeued;
+ return 0;
+ case offsetof(struct Qdisc_ops, id):
+ if (bpf_obj_name_cpy(qdisc_ops->id, uqdisc_ops->id,
+ sizeof(qdisc_ops->id)) <= 0)
+ return -EINVAL;
+ return 1;
+ }
+
+ return 0;
+}
+
+static int bpf_qdisc_reg(void *kdata, struct bpf_link *link)
+{
+ return register_qdisc(kdata);
+}
+
+static void bpf_qdisc_unreg(void *kdata, struct bpf_link *link)
+{
+ return unregister_qdisc(kdata);
+}
+
+static int bpf_qdisc_validate(void *kdata)
+{
+ struct Qdisc_ops *ops = (struct Qdisc_ops *)kdata;
+
+ if (!ops->enqueue || !ops->dequeue || !ops->init ||
+ !ops->reset || !ops->destroy)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int Qdisc_ops__enqueue(struct sk_buff *skb__ref, struct Qdisc *sch,
+ struct sk_buff **to_free)
+{
+ return 0;
+}
+
+static struct sk_buff *Qdisc_ops__dequeue(struct Qdisc *sch)
+{
+ return NULL;
+}
+
+static int Qdisc_ops__init(struct Qdisc *sch, struct nlattr *arg,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static void Qdisc_ops__reset(struct Qdisc *sch)
+{
+}
+
+static void Qdisc_ops__destroy(struct Qdisc *sch)
+{
+}
+
+static struct Qdisc_ops __bpf_ops_qdisc_ops = {
+ .enqueue = Qdisc_ops__enqueue,
+ .dequeue = Qdisc_ops__dequeue,
+ .init = Qdisc_ops__init,
+ .reset = Qdisc_ops__reset,
+ .destroy = Qdisc_ops__destroy,
+};
+
+static struct bpf_struct_ops bpf_Qdisc_ops = {
+ .verifier_ops = &bpf_qdisc_verifier_ops,
+ .reg = bpf_qdisc_reg,
+ .unreg = bpf_qdisc_unreg,
+ .validate = bpf_qdisc_validate,
+ .init_member = bpf_qdisc_init_member,
+ .init = bpf_qdisc_init,
+ .name = "Qdisc_ops",
+ .cfi_stubs = &__bpf_ops_qdisc_ops,
+ .owner = THIS_MODULE,
+};
+
+BTF_ID_LIST(bpf_sk_buff_dtor_ids)
+BTF_ID(func, bpf_kfree_skb)
+
+static int __init bpf_qdisc_kfunc_init(void)
+{
+ int ret;
+ const struct btf_id_dtor_kfunc skb_kfunc_dtors[] = {
+ {
+ .btf_id = bpf_sk_buff_ids[0],
+ .kfunc_btf_id = bpf_sk_buff_dtor_ids[0]
+ },
+ };
+
+ ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_qdisc_kfunc_set);
+ ret = ret ?: register_btf_id_dtor_kfuncs(skb_kfunc_dtors,
+ ARRAY_SIZE(skb_kfunc_dtors),
+ THIS_MODULE);
+ ret = ret ?: register_bpf_struct_ops(&bpf_Qdisc_ops, Qdisc_ops);
+
+ return ret;
+}
+late_initcall(bpf_qdisc_kfunc_init);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 17d97bbe890f..ecec0a1e1c1a 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -97,7 +97,7 @@ tcf_exts_miss_cookie_base_alloc(struct tcf_exts *exts, struct tcf_proto *tp,
err = xa_alloc_cyclic(&tcf_exts_miss_cookies_xa, &n->miss_cookie_base,
n, xa_limit_32b, &next, GFP_KERNEL);
- if (err)
+ if (err < 0)
goto err_xa_alloc;
exts->miss_cookie_node = n;
@@ -390,6 +390,7 @@ static struct tcf_proto *tcf_proto_create(const char *kind, u32 protocol,
tp->protocol = protocol;
tp->prio = prio;
tp->chain = chain;
+ tp->usesw = !tp->ops->reoffload;
spin_lock_init(&tp->lock);
refcount_set(&tp->refcnt, 1);
@@ -410,39 +411,31 @@ static void tcf_proto_get(struct tcf_proto *tp)
refcount_inc(&tp->refcnt);
}
-static void tcf_maintain_bypass(struct tcf_block *block)
+static void tcf_proto_count_usesw(struct tcf_proto *tp, bool add)
{
- int filtercnt = atomic_read(&block->filtercnt);
- int skipswcnt = atomic_read(&block->skipswcnt);
- bool bypass_wanted = filtercnt > 0 && filtercnt == skipswcnt;
-
- if (bypass_wanted != block->bypass_wanted) {
#ifdef CONFIG_NET_CLS_ACT
- if (bypass_wanted)
- static_branch_inc(&tcf_bypass_check_needed_key);
- else
- static_branch_dec(&tcf_bypass_check_needed_key);
-#endif
- block->bypass_wanted = bypass_wanted;
+ struct tcf_block *block = tp->chain->block;
+ bool counted = false;
+
+ if (!add) {
+ if (tp->usesw && tp->counted) {
+ if (!atomic_dec_return(&block->useswcnt))
+ static_branch_dec(&tcf_sw_enabled_key);
+ tp->counted = false;
+ }
+ return;
}
-}
-static void tcf_block_filter_cnt_update(struct tcf_block *block, bool *counted, bool add)
-{
- lockdep_assert_not_held(&block->cb_lock);
-
- down_write(&block->cb_lock);
- if (*counted != add) {
- if (add) {
- atomic_inc(&block->filtercnt);
- *counted = true;
- } else {
- atomic_dec(&block->filtercnt);
- *counted = false;
- }
+ spin_lock(&tp->lock);
+ if (tp->usesw && !tp->counted) {
+ counted = true;
+ tp->counted = true;
}
- tcf_maintain_bypass(block);
- up_write(&block->cb_lock);
+ spin_unlock(&tp->lock);
+
+ if (counted && atomic_inc_return(&block->useswcnt) == 1)
+ static_branch_inc(&tcf_sw_enabled_key);
+#endif
}
static void tcf_chain_put(struct tcf_chain *chain);
@@ -451,7 +444,7 @@ static void tcf_proto_destroy(struct tcf_proto *tp, bool rtnl_held,
bool sig_destroy, struct netlink_ext_ack *extack)
{
tp->ops->destroy(tp, rtnl_held, extack);
- tcf_block_filter_cnt_update(tp->chain->block, &tp->counted, false);
+ tcf_proto_count_usesw(tp, false);
if (sig_destroy)
tcf_proto_signal_destroyed(tp->chain, tp);
tcf_chain_put(tp->chain);
@@ -1518,6 +1511,7 @@ int tcf_block_get_ext(struct tcf_block **p_block, struct Qdisc *q,
return 0;
err_dev_insert:
+ tcf_block_offload_unbind(block, q, ei);
err_block_offload_bind:
tcf_chain0_head_change_cb_del(block, ei);
err_chain0_head_change_cb_add:
@@ -1932,7 +1926,8 @@ static void tcf_chain_tp_remove(struct tcf_chain *chain,
static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
struct tcf_chain_info *chain_info,
u32 protocol, u32 prio,
- bool prio_allocate);
+ bool prio_allocate,
+ struct netlink_ext_ack *extack);
/* Try to insert new proto.
* If proto with specified priority already exists, free new proto
@@ -1956,8 +1951,7 @@ static struct tcf_proto *tcf_chain_tp_insert_unique(struct tcf_chain *chain,
return ERR_PTR(-EAGAIN);
}
- tp = tcf_chain_tp_find(chain, &chain_info,
- protocol, prio, false);
+ tp = tcf_chain_tp_find(chain, &chain_info, protocol, prio, false, NULL);
if (!tp)
err = tcf_chain_tp_insert(chain, &chain_info, tp_new);
mutex_unlock(&chain->filter_chain_lock);
@@ -2017,7 +2011,8 @@ static void tcf_chain_tp_delete_empty(struct tcf_chain *chain,
static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
struct tcf_chain_info *chain_info,
u32 protocol, u32 prio,
- bool prio_allocate)
+ bool prio_allocate,
+ struct netlink_ext_ack *extack)
{
struct tcf_proto **pprev;
struct tcf_proto *tp;
@@ -2028,9 +2023,14 @@ static struct tcf_proto *tcf_chain_tp_find(struct tcf_chain *chain,
pprev = &tp->next) {
if (tp->prio >= prio) {
if (tp->prio == prio) {
- if (prio_allocate ||
- (tp->protocol != protocol && protocol))
+ if (prio_allocate) {
+ NL_SET_ERR_MSG(extack, "Lowest ID from auto-alloc range already in use");
+ return ERR_PTR(-ENOSPC);
+ }
+ if (tp->protocol != protocol && protocol) {
+ NL_SET_ERR_MSG(extack, "Protocol mismatch for filter with specified priority");
return ERR_PTR(-EINVAL);
+ }
} else {
tp = NULL;
}
@@ -2057,6 +2057,7 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
struct tcmsg *tcm;
struct nlmsghdr *nlh;
unsigned char *b = skb_tail_pointer(skb);
+ int ret = -EMSGSIZE;
nlh = nlmsg_put(skb, portid, seq, event, sizeof(*tcm), flags);
if (!nlh)
@@ -2101,11 +2102,45 @@ static int tcf_fill_node(struct net *net, struct sk_buff *skb,
return skb->len;
+cls_op_not_supp:
+ ret = -EOPNOTSUPP;
out_nlmsg_trim:
nla_put_failure:
-cls_op_not_supp:
nlmsg_trim(skb, b);
- return -1;
+ return ret;
+}
+
+static struct sk_buff *tfilter_notify_prep(struct net *net,
+ struct sk_buff *oskb,
+ struct nlmsghdr *n,
+ struct tcf_proto *tp,
+ struct tcf_block *block,
+ struct Qdisc *q, u32 parent,
+ void *fh, int event,
+ u32 portid, bool rtnl_held,
+ struct netlink_ext_ack *extack)
+{
+ unsigned int size = oskb ? max(NLMSG_GOODSIZE, oskb->len) : NLMSG_GOODSIZE;
+ struct sk_buff *skb;
+ int ret;
+
+retry:
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return ERR_PTR(-ENOBUFS);
+
+ ret = tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
+ n->nlmsg_seq, n->nlmsg_flags, event, false,
+ rtnl_held, extack);
+ if (ret <= 0) {
+ kfree_skb(skb);
+ if (ret == -EMSGSIZE) {
+ size += NLMSG_GOODSIZE;
+ goto retry;
+ }
+ return ERR_PTR(-EINVAL);
+ }
+ return skb;
}
static int tfilter_notify(struct net *net, struct sk_buff *oskb,
@@ -2121,16 +2156,10 @@ static int tfilter_notify(struct net *net, struct sk_buff *oskb,
if (!unicast && !rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
return 0;
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!skb)
- return -ENOBUFS;
-
- if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
- n->nlmsg_seq, n->nlmsg_flags, event,
- false, rtnl_held, extack) <= 0) {
- kfree_skb(skb);
- return -EINVAL;
- }
+ skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh, event,
+ portid, rtnl_held, extack);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
if (unicast)
err = rtnl_unicast(skb, net, portid);
@@ -2153,16 +2182,11 @@ static int tfilter_del_notify(struct net *net, struct sk_buff *oskb,
if (!rtnl_notify_needed(net, n->nlmsg_flags, RTNLGRP_TC))
return tp->ops->delete(tp, fh, last, rtnl_held, extack);
- skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
- if (!skb)
- return -ENOBUFS;
-
- if (tcf_fill_node(net, skb, tp, block, q, parent, fh, portid,
- n->nlmsg_seq, n->nlmsg_flags, RTM_DELTFILTER,
- false, rtnl_held, extack) <= 0) {
+ skb = tfilter_notify_prep(net, oskb, n, tp, block, q, parent, fh,
+ RTM_DELTFILTER, portid, rtnl_held, extack);
+ if (IS_ERR(skb)) {
NL_SET_ERR_MSG(extack, "Failed to build del event notification");
- kfree_skb(skb);
- return -EINVAL;
+ return PTR_ERR(skb);
}
err = tp->ops->delete(tp, fh, last, rtnl_held, extack);
@@ -2296,7 +2320,7 @@ replay:
}
block->classid = parent;
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -2311,9 +2335,8 @@ replay:
mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
- prio, prio_allocate);
+ prio, prio_allocate, extack);
if (IS_ERR(tp)) {
- NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
err = PTR_ERR(tp);
goto errout_locked;
}
@@ -2403,7 +2426,7 @@ replay:
tfilter_notify(net, skb, n, tp, block, q, parent, fh,
RTM_NEWTFILTER, false, rtnl_held, extack);
tfilter_put(tp, fh);
- tcf_block_filter_cnt_update(block, &tp->counted, true);
+ tcf_proto_count_usesw(tp, true);
/* q pointer is NULL for shared blocks */
if (q)
q->flags &= ~TCQ_F_CAN_BYPASS;
@@ -2508,7 +2531,7 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
goto errout;
}
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -2538,10 +2561,13 @@ static int tc_del_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
- prio, false);
- if (!tp || IS_ERR(tp)) {
+ prio, false, extack);
+ if (!tp) {
+ err = -ENOENT;
NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
- err = tp ? PTR_ERR(tp) : -ENOENT;
+ goto errout_locked;
+ } else if (IS_ERR(tp)) {
+ err = PTR_ERR(tp);
goto errout_locked;
} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
@@ -2663,7 +2689,7 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
goto errout;
}
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -2678,11 +2704,14 @@ static int tc_get_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
mutex_lock(&chain->filter_chain_lock);
tp = tcf_chain_tp_find(chain, &chain_info, protocol,
- prio, false);
+ prio, false, extack);
mutex_unlock(&chain->filter_chain_lock);
- if (!tp || IS_ERR(tp)) {
+ if (!tp) {
+ err = -ENOENT;
NL_SET_ERR_MSG(extack, "Filter with specified priority/protocol not found");
- err = tp ? PTR_ERR(tp) : -ENOENT;
+ goto errout;
+ } else if (IS_ERR(tp)) {
+ err = PTR_ERR(tp);
goto errout;
} else if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], tp->ops->kind)) {
NL_SET_ERR_MSG(extack, "Specified filter kind does not match existing one");
@@ -3103,7 +3132,7 @@ replay:
if (IS_ERR(block))
return PTR_ERR(block);
- chain_index = tca[TCA_CHAIN] ? nla_get_u32(tca[TCA_CHAIN]) : 0;
+ chain_index = nla_get_u32_default(tca[TCA_CHAIN], 0);
if (chain_index > TC_ACT_EXT_VAL_MASK) {
NL_SET_ERR_MSG(extack, "Specified chain index exceeds upper limit");
err = -EINVAL;
@@ -3520,8 +3549,6 @@ static void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
if (*flags & TCA_CLS_FLAGS_IN_HW)
return;
*flags |= TCA_CLS_FLAGS_IN_HW;
- if (tc_skip_sw(*flags))
- atomic_inc(&block->skipswcnt);
atomic_inc(&block->offloadcnt);
}
@@ -3530,8 +3557,6 @@ static void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
if (!(*flags & TCA_CLS_FLAGS_IN_HW))
return;
*flags &= ~TCA_CLS_FLAGS_IN_HW;
- if (tc_skip_sw(*flags))
- atomic_dec(&block->skipswcnt);
atomic_dec(&block->offloadcnt);
}
@@ -4055,6 +4080,19 @@ static struct pernet_operations tcf_net_ops = {
.size = sizeof(struct tcf_net),
};
+static const struct rtnl_msg_handler tc_filter_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWTFILTER, .doit = tc_new_tfilter,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.msgtype = RTM_DELTFILTER, .doit = tc_del_tfilter,
+ .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.msgtype = RTM_GETTFILTER, .doit = tc_get_tfilter,
+ .dumpit = tc_dump_tfilter, .flags = RTNL_FLAG_DOIT_UNLOCKED},
+ {.msgtype = RTM_NEWCHAIN, .doit = tc_ctl_chain},
+ {.msgtype = RTM_DELCHAIN, .doit = tc_ctl_chain},
+ {.msgtype = RTM_GETCHAIN, .doit = tc_ctl_chain,
+ .dumpit = tc_dump_chain},
+};
+
static int __init tc_filter_init(void)
{
int err;
@@ -4068,17 +4106,7 @@ static int __init tc_filter_init(void)
goto err_register_pernet_subsys;
xa_init_flags(&tcf_exts_miss_cookies_xa, XA_FLAGS_ALLOC1);
-
- rtnl_register(PF_UNSPEC, RTM_NEWTFILTER, tc_new_tfilter, NULL,
- RTNL_FLAG_DOIT_UNLOCKED);
- rtnl_register(PF_UNSPEC, RTM_DELTFILTER, tc_del_tfilter, NULL,
- RTNL_FLAG_DOIT_UNLOCKED);
- rtnl_register(PF_UNSPEC, RTM_GETTFILTER, tc_get_tfilter,
- tc_dump_tfilter, RTNL_FLAG_DOIT_UNLOCKED);
- rtnl_register(PF_UNSPEC, RTM_NEWCHAIN, tc_ctl_chain, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELCHAIN, tc_ctl_chain, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETCHAIN, tc_ctl_chain,
- tc_dump_chain, 0);
+ rtnl_register_many(tc_filter_rtnl_msg_handlers);
return 0;
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 1941ebec23ff..7fbe42f0e5c2 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -509,6 +509,8 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(prog->gen_flags))
prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, prog->gen_flags);
+
if (oldprog) {
idr_replace(&head->handle_idr, prog, handle);
list_replace_rcu(&oldprog->link, &prog->link);
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 5502998aace7..5693b41b093f 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -345,7 +345,7 @@ TC_INDIRECT_SCOPE int flow_classify(struct sk_buff *skb,
static void flow_perturbation(struct timer_list *t)
{
- struct flow_filter *f = from_timer(f, t, perturb_timer);
+ struct flow_filter *f = timer_container_of(f, t, perturb_timer);
get_random_bytes(&f->hashrnd, 4);
if (f->perturb_period)
@@ -356,7 +356,8 @@ static const struct nla_policy flow_policy[TCA_FLOW_MAX + 1] = {
[TCA_FLOW_KEYS] = { .type = NLA_U32 },
[TCA_FLOW_MODE] = { .type = NLA_U32 },
[TCA_FLOW_BASECLASS] = { .type = NLA_U32 },
- [TCA_FLOW_RSHIFT] = { .type = NLA_U32 },
+ [TCA_FLOW_RSHIFT] = NLA_POLICY_MAX(NLA_U32,
+ 31 /* BITS_PER_U32 - 1 */),
[TCA_FLOW_ADDEND] = { .type = NLA_U32 },
[TCA_FLOW_MASK] = { .type = NLA_U32 },
[TCA_FLOW_XOR] = { .type = NLA_U32 },
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index e280c27cb9f9..099ff6a3e1f5 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -766,7 +766,7 @@ geneve_opt_policy[TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX + 1] = {
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS] = { .type = NLA_U16 },
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA] = { .type = NLA_BINARY,
- .len = 128 },
+ .len = 127 },
};
static const struct nla_policy
@@ -1369,7 +1369,6 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
int err;
md = (struct erspan_metadata *)&key->enc_opts.data[key->enc_opts.len];
- memset(md, 0xff, sizeof(*md));
md->version = 1;
if (!depth)
@@ -1398,9 +1397,9 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option index");
return -EINVAL;
}
+ memset(&md->u.index, 0xff, sizeof(md->u.index));
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) {
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX];
- memset(&md->u, 0x00, sizeof(md->u));
md->u.index = nla_get_be32(nla);
}
} else if (md->version == 2) {
@@ -1409,10 +1408,12 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key,
NL_SET_ERR_MSG(extack, "Missing tunnel key erspan option dir or hwid");
return -EINVAL;
}
+ md->u.md2.dir = 1;
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR]) {
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR];
md->u.md2.dir = nla_get_u8(nla);
}
+ set_hwid(&md->u.md2, 0xff);
if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID]) {
nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID];
set_hwid(&md->u.md2, nla_get_u8(nla));
@@ -2502,6 +2503,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(fnew->flags))
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, fnew->flags);
+
spin_lock(&tp->lock);
/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 9f1e62ca508d..f03bf5da39ee 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -228,6 +228,8 @@ static int mall_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(new->flags))
new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, new->flags);
+
*arg = head;
rcu_assign_pointer(tp->root, new);
return 0;
diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
index 9412d88a99bc..2a1c00048fd6 100644
--- a/net/sched/cls_u32.c
+++ b/net/sched/cls_u32.c
@@ -92,6 +92,16 @@ struct tc_u_common {
long knodes;
};
+static u32 handle2id(u32 h)
+{
+ return ((h & 0x80000000) ? ((h >> 20) & 0x7FF) : h);
+}
+
+static u32 id2handle(u32 id)
+{
+ return (id | 0x800U) << 20;
+}
+
static inline unsigned int u32_hash_fold(__be32 key,
const struct tc_u32_sel *sel,
u8 fshift)
@@ -310,7 +320,7 @@ static u32 gen_new_htid(struct tc_u_common *tp_c, struct tc_u_hnode *ptr)
int id = idr_alloc_cyclic(&tp_c->handle_idr, ptr, 1, 0x7FF, GFP_KERNEL);
if (id < 0)
return 0;
- return (id | 0x800U) << 20;
+ return id2handle(id);
}
static struct hlist_head *tc_u_common_hash;
@@ -360,7 +370,7 @@ static int u32_init(struct tcf_proto *tp)
return -ENOBUFS;
refcount_set(&root_ht->refcnt, 1);
- root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : 0x80000000;
+ root_ht->handle = tp_c ? gen_new_htid(tp_c, root_ht) : id2handle(0);
root_ht->prio = tp->prio;
root_ht->is_root = true;
idr_init(&root_ht->handle_idr);
@@ -612,7 +622,7 @@ static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht,
if (phn == ht) {
u32_clear_hw_hnode(tp, ht, extack);
idr_destroy(&ht->handle_idr);
- idr_remove(&tp_c->handle_idr, ht->handle);
+ idr_remove(&tp_c->handle_idr, handle2id(ht->handle));
RCU_INIT_POINTER(*hn, ht->next);
kfree_rcu(ht, rcu);
return 0;
@@ -941,6 +951,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(new->flags))
new->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, new->flags);
+
u32_replace_knode(tp, tp_c, new);
tcf_unbind_filter(tp, &n->res);
tcf_exts_get_net(&n->exts);
@@ -989,7 +1001,7 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
err = u32_replace_hw_hnode(tp, ht, userflags, extack);
if (err) {
- idr_remove(&tp_c->handle_idr, handle);
+ idr_remove(&tp_c->handle_idr, handle2id(handle));
kfree(ht);
return err;
}
@@ -1154,6 +1166,8 @@ static int u32_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(n->flags))
n->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ tcf_proto_update_usesw(tp, n->flags);
+
ins = &ht->ht[TC_U32_HASH(handle)];
for (pins = rtnl_dereference(*ins); pins;
ins = &pins->next, pins = rtnl_dereference(*ins))
diff --git a/net/sched/em_meta.c b/net/sched/em_meta.c
index 8996c73c9779..3f2e707a11d1 100644
--- a/net/sched/em_meta.c
+++ b/net/sched/em_meta.c
@@ -460,7 +460,7 @@ META_COLLECTOR(int_sk_fwd_alloc)
*err = -1;
return;
}
- dst->value = sk_forward_alloc_get(sk);
+ dst->value = READ_ONCE(sk->sk_forward_alloc);
}
META_COLLECTOR(int_sk_sndbuf)
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2eefa4783879..c5e3673aadbe 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -25,7 +25,9 @@
#include <linux/hrtimer.h>
#include <linux/slab.h>
#include <linux/hashtable.h>
+#include <linux/bpf.h>
+#include <net/netdev_lock.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/netlink.h>
@@ -206,7 +208,7 @@ static struct Qdisc_ops *qdisc_lookup_default(const char *name)
for (q = qdisc_base; q; q = q->next) {
if (!strcmp(name, q->id)) {
- if (!try_module_get(q->owner))
+ if (!bpf_try_module_get(q, q->owner))
q = NULL;
break;
}
@@ -236,7 +238,7 @@ int qdisc_set_default(const char *name)
if (ops) {
/* Set new default */
- module_put(default_qdisc_ops->owner);
+ bpf_module_put(default_qdisc_ops, default_qdisc_ops->owner);
default_qdisc_ops = ops;
}
write_unlock(&qdisc_mod_lock);
@@ -358,7 +360,7 @@ static struct Qdisc_ops *qdisc_lookup_ops(struct nlattr *kind)
read_lock(&qdisc_mod_lock);
for (q = qdisc_base; q; q = q->next) {
if (nla_strcmp(kind, q->id) == 0) {
- if (!try_module_get(q->owner))
+ if (!bpf_try_module_get(q, q->owner))
q = NULL;
break;
}
@@ -619,8 +621,7 @@ static enum hrtimer_restart qdisc_watchdog(struct hrtimer *timer)
void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
clockid_t clockid)
{
- hrtimer_init(&wd->timer, clockid, HRTIMER_MODE_ABS_PINNED);
- wd->timer.function = qdisc_watchdog;
+ hrtimer_setup(&wd->timer, qdisc_watchdog, clockid, HRTIMER_MODE_ABS_PINNED);
wd->qdisc = qdisc;
}
EXPORT_SYMBOL(qdisc_watchdog_init_clockid);
@@ -791,7 +792,7 @@ void qdisc_tree_reduce_backlog(struct Qdisc *sch, int n, int len)
drops = max_t(int, n, 0);
rcu_read_lock();
while ((parentid = sch->parent)) {
- if (TC_H_MAJ(parentid) == TC_H_MAJ(TC_H_INGRESS))
+ if (parentid == TC_H_ROOT)
break;
if (sch->flags & TCQ_F_NOPARENT)
@@ -1267,36 +1268,8 @@ static struct Qdisc *qdisc_create(struct net_device *dev,
struct qdisc_size_table *stab;
ops = qdisc_lookup_ops(kind);
-#ifdef CONFIG_MODULES
- if (ops == NULL && kind != NULL) {
- char name[IFNAMSIZ];
- if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
- /* We dropped the RTNL semaphore in order to
- * perform the module load. So, even if we
- * succeeded in loading the module we have to
- * tell the caller to replay the request. We
- * indicate this using -EAGAIN.
- * We replay the request because the device may
- * go away in the mean time.
- */
- rtnl_unlock();
- request_module(NET_SCH_ALIAS_PREFIX "%s", name);
- rtnl_lock();
- ops = qdisc_lookup_ops(kind);
- if (ops != NULL) {
- /* We will try again qdisc_lookup_ops,
- * so don't keep a reference.
- */
- module_put(ops->owner);
- err = -EAGAIN;
- goto err_out;
- }
- }
- }
-#endif
-
- err = -ENOENT;
if (!ops) {
+ err = -ENOENT;
NL_SET_ERR_MSG(extack, "Specified qdisc kind is unknown");
goto err_out;
}
@@ -1398,7 +1371,7 @@ err_out3:
netdev_put(dev, &sch->dev_tracker);
qdisc_free(sch);
err_out2:
- module_put(ops->owner);
+ bpf_module_put(ops, ops->owner);
err_out:
*errp = err;
return NULL;
@@ -1505,27 +1478,18 @@ const struct nla_policy rtm_tca_policy[TCA_MAX + 1] = {
* Delete/get qdisc.
*/
-static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
- struct netlink_ext_ack *extack)
+static int __tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack,
+ struct net_device *dev,
+ struct nlattr *tca[TCA_MAX + 1],
+ struct tcmsg *tcm)
{
struct net *net = sock_net(skb->sk);
- struct tcmsg *tcm = nlmsg_data(n);
- struct nlattr *tca[TCA_MAX + 1];
- struct net_device *dev;
- u32 clid;
struct Qdisc *q = NULL;
struct Qdisc *p = NULL;
+ u32 clid;
int err;
- err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
- rtm_tca_policy, extack);
- if (err < 0)
- return err;
-
- dev = __dev_get_by_index(net, tcm->tcm_ifindex);
- if (!dev)
- return -ENODEV;
-
clid = tcm->tcm_parent;
if (clid) {
if (clid != TC_H_ROOT) {
@@ -1560,7 +1524,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
}
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
- NL_SET_ERR_MSG(extack, "Invalid qdisc name");
+ NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc");
return -EINVAL;
}
@@ -1582,6 +1546,31 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
return 0;
}
+static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tcmsg *tcm = nlmsg_data(n);
+ struct nlattr *tca[TCA_MAX + 1];
+ struct net_device *dev;
+ int err;
+
+ err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
+ rtm_tca_policy, extack);
+ if (err < 0)
+ return err;
+
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
+ return -ENODEV;
+
+ netdev_lock_ops(dev);
+ err = __tc_get_qdisc(skb, n, extack, dev, tca, tcm);
+ netdev_unlock_ops(dev);
+
+ return err;
+}
+
static bool req_create_or_replace(struct nlmsghdr *n)
{
return (n->nlmsg_flags & NLM_F_CREATE &&
@@ -1601,35 +1590,18 @@ static bool req_change(struct nlmsghdr *n)
!(n->nlmsg_flags & NLM_F_EXCL));
}
-/*
- * Create/change qdisc.
- */
-static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
- struct netlink_ext_ack *extack)
+static int __tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack,
+ struct net_device *dev,
+ struct nlattr *tca[TCA_MAX + 1],
+ struct tcmsg *tcm)
{
- struct net *net = sock_net(skb->sk);
- struct tcmsg *tcm;
- struct nlattr *tca[TCA_MAX + 1];
- struct net_device *dev;
+ struct Qdisc *q = NULL;
+ struct Qdisc *p = NULL;
u32 clid;
- struct Qdisc *q, *p;
int err;
-replay:
- /* Reinit, just in case something touches this. */
- err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
- rtm_tca_policy, extack);
- if (err < 0)
- return err;
-
- tcm = nlmsg_data(n);
clid = tcm->tcm_parent;
- q = p = NULL;
-
- dev = __dev_get_by_index(net, tcm->tcm_ifindex);
- if (!dev)
- return -ENODEV;
-
if (clid) {
if (clid != TC_H_ROOT) {
@@ -1664,13 +1636,17 @@ replay:
q = qdisc_lookup(dev, tcm->tcm_handle);
if (!q)
goto create_n_graft;
+ if (q->parent != tcm->tcm_parent) {
+ NL_SET_ERR_MSG(extack, "Cannot move an existing qdisc to a different parent");
+ return -EINVAL;
+ }
if (n->nlmsg_flags & NLM_F_EXCL) {
NL_SET_ERR_MSG(extack, "Exclusivity flag on, cannot override");
return -EEXIST;
}
if (tca[TCA_KIND] &&
nla_strcmp(tca[TCA_KIND], q->ops->id)) {
- NL_SET_ERR_MSG(extack, "Invalid qdisc name");
+ NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc");
return -EINVAL;
}
if (q->flags & TCQ_F_INGRESS) {
@@ -1746,12 +1722,12 @@ replay:
return -EEXIST;
}
if (tca[TCA_KIND] && nla_strcmp(tca[TCA_KIND], q->ops->id)) {
- NL_SET_ERR_MSG(extack, "Invalid qdisc name");
+ NL_SET_ERR_MSG(extack, "Invalid qdisc name: must match existing qdisc");
return -EINVAL;
}
err = qdisc_change(q, tca, extack);
if (err == 0)
- qdisc_notify(net, skb, n, clid, NULL, q, extack);
+ qdisc_notify(sock_net(skb->sk), skb, n, clid, NULL, q, extack);
return err;
create_n_graft:
@@ -1783,11 +1759,8 @@ create_n_graft2:
tcm->tcm_parent, tcm->tcm_handle,
tca, &err, extack);
}
- if (q == NULL) {
- if (err == -EAGAIN)
- goto replay;
+ if (!q)
return err;
- }
graft:
err = qdisc_graft(dev, p, skb, n, clid, q, NULL, extack);
@@ -1800,6 +1773,58 @@ graft:
return 0;
}
+static void request_qdisc_module(struct nlattr *kind)
+{
+ struct Qdisc_ops *ops;
+ char name[IFNAMSIZ];
+
+ if (!kind)
+ return;
+
+ ops = qdisc_lookup_ops(kind);
+ if (ops) {
+ bpf_module_put(ops, ops->owner);
+ return;
+ }
+
+ if (nla_strscpy(name, kind, IFNAMSIZ) >= 0) {
+ rtnl_unlock();
+ request_module(NET_SCH_ALIAS_PREFIX "%s", name);
+ rtnl_lock();
+ }
+}
+
+/*
+ * Create/change qdisc.
+ */
+static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tca[TCA_MAX + 1];
+ struct net_device *dev;
+ struct tcmsg *tcm;
+ int err;
+
+ err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
+ rtm_tca_policy, extack);
+ if (err < 0)
+ return err;
+
+ request_qdisc_module(tca[TCA_KIND]);
+
+ tcm = nlmsg_data(n);
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
+ return -ENODEV;
+
+ netdev_lock_ops(dev);
+ err = __tc_modify_qdisc(skb, n, extack, dev, tca, tcm);
+ netdev_unlock_ops(dev);
+
+ return err;
+}
+
static int tc_dump_qdisc_root(struct Qdisc *root, struct sk_buff *skb,
struct netlink_callback *cb,
int *q_idx_p, int s_q_idx, bool recur,
@@ -1884,17 +1909,23 @@ static int tc_dump_qdisc(struct sk_buff *skb, struct netlink_callback *cb)
s_q_idx = 0;
q_idx = 0;
+ netdev_lock_ops(dev);
if (tc_dump_qdisc_root(rtnl_dereference(dev->qdisc),
skb, cb, &q_idx, s_q_idx,
- true, tca[TCA_DUMP_INVISIBLE]) < 0)
+ true, tca[TCA_DUMP_INVISIBLE]) < 0) {
+ netdev_unlock_ops(dev);
goto done;
+ }
dev_queue = dev_ingress_queue(dev);
if (dev_queue &&
tc_dump_qdisc_root(rtnl_dereference(dev_queue->qdisc_sleeping),
skb, cb, &q_idx, s_q_idx, false,
- tca[TCA_DUMP_INVISIBLE]) < 0)
+ tca[TCA_DUMP_INVISIBLE]) < 0) {
+ netdev_unlock_ops(dev);
goto done;
+ }
+ netdev_unlock_ops(dev);
cont:
idx++;
@@ -2131,15 +2162,15 @@ static void tc_bind_tclass(struct Qdisc *q, u32 portid, u32 clid,
#endif
-static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
- struct netlink_ext_ack *extack)
+static int __tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack,
+ struct net_device *dev,
+ struct nlattr *tca[TCA_MAX + 1],
+ struct tcmsg *tcm)
{
struct net *net = sock_net(skb->sk);
- struct tcmsg *tcm = nlmsg_data(n);
- struct nlattr *tca[TCA_MAX + 1];
- struct net_device *dev;
- struct Qdisc *q = NULL;
const struct Qdisc_class_ops *cops;
+ struct Qdisc *q = NULL;
unsigned long cl = 0;
unsigned long new_cl;
u32 portid;
@@ -2147,15 +2178,6 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
u32 qid;
int err;
- err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
- rtm_tca_policy, extack);
- if (err < 0)
- return err;
-
- dev = __dev_get_by_index(net, tcm->tcm_ifindex);
- if (!dev)
- return -ENODEV;
-
/*
parent == TC_H_UNSPEC - unspecified parent.
parent == TC_H_ROOT - class is root, which has no parent.
@@ -2250,6 +2272,12 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
return -EOPNOTSUPP;
}
+ /* Prevent creation of traffic classes with classid TC_H_ROOT */
+ if (clid == TC_H_ROOT) {
+ NL_SET_ERR_MSG(extack, "Cannot create traffic class with classid TC_H_ROOT");
+ return -EINVAL;
+ }
+
new_cl = cl;
err = -EOPNOTSUPP;
if (cops->change)
@@ -2264,6 +2292,31 @@ out:
return err;
}
+static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct tcmsg *tcm = nlmsg_data(n);
+ struct nlattr *tca[TCA_MAX + 1];
+ struct net_device *dev;
+ int err;
+
+ err = nlmsg_parse_deprecated(n, sizeof(*tcm), tca, TCA_MAX,
+ rtm_tca_policy, extack);
+ if (err < 0)
+ return err;
+
+ dev = __dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
+ return -ENODEV;
+
+ netdev_lock_ops(dev);
+ err = __tc_ctl_tclass(skb, n, extack, dev, tca, tcm);
+ netdev_unlock_ops(dev);
+
+ return err;
+}
+
struct qdisc_dump_args {
struct qdisc_walker w;
struct sk_buff *skb;
@@ -2340,20 +2393,12 @@ static int tc_dump_tclass_root(struct Qdisc *root, struct sk_buff *skb,
return 0;
}
-static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
+static int __tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb,
+ struct tcmsg *tcm, struct net_device *dev)
{
- struct tcmsg *tcm = nlmsg_data(cb->nlh);
- struct net *net = sock_net(skb->sk);
struct netdev_queue *dev_queue;
- struct net_device *dev;
int t, s_t;
- if (nlmsg_len(cb->nlh) < sizeof(*tcm))
- return 0;
- dev = dev_get_by_index(net, tcm->tcm_ifindex);
- if (!dev)
- return 0;
-
s_t = cb->args[0];
t = 0;
@@ -2370,10 +2415,32 @@ static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
done:
cb->args[0] = t;
- dev_put(dev);
return skb->len;
}
+static int tc_dump_tclass(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct tcmsg *tcm = nlmsg_data(cb->nlh);
+ struct net *net = sock_net(skb->sk);
+ struct net_device *dev;
+ int err;
+
+ if (nlmsg_len(cb->nlh) < sizeof(*tcm))
+ return 0;
+
+ dev = dev_get_by_index(net, tcm->tcm_ifindex);
+ if (!dev)
+ return 0;
+
+ netdev_lock_ops(dev);
+ err = __tc_dump_tclass(skb, cb, tcm, dev);
+ netdev_unlock_ops(dev);
+
+ dev_put(dev);
+
+ return err;
+}
+
#ifdef CONFIG_PROC_FS
static int psched_show(struct seq_file *seq, void *v)
{
@@ -2420,6 +2487,17 @@ static struct pernet_operations psched_net_ops = {
DEFINE_STATIC_KEY_FALSE(tc_skip_wrapper);
#endif
+static const struct rtnl_msg_handler psched_rtnl_msg_handlers[] __initconst = {
+ {.msgtype = RTM_NEWQDISC, .doit = tc_modify_qdisc},
+ {.msgtype = RTM_DELQDISC, .doit = tc_get_qdisc},
+ {.msgtype = RTM_GETQDISC, .doit = tc_get_qdisc,
+ .dumpit = tc_dump_qdisc},
+ {.msgtype = RTM_NEWTCLASS, .doit = tc_ctl_tclass},
+ {.msgtype = RTM_DELTCLASS, .doit = tc_ctl_tclass},
+ {.msgtype = RTM_GETTCLASS, .doit = tc_ctl_tclass,
+ .dumpit = tc_dump_tclass},
+};
+
static int __init pktsched_init(void)
{
int err;
@@ -2438,14 +2516,7 @@ static int __init pktsched_init(void)
register_qdisc(&mq_qdisc_ops);
register_qdisc(&noqueue_qdisc_ops);
- rtnl_register(PF_UNSPEC, RTM_NEWQDISC, tc_modify_qdisc, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELQDISC, tc_get_qdisc, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETQDISC, tc_get_qdisc, tc_dump_qdisc,
- 0);
- rtnl_register(PF_UNSPEC, RTM_NEWTCLASS, tc_ctl_tclass, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_DELTCLASS, tc_ctl_tclass, NULL, 0);
- rtnl_register(PF_UNSPEC, RTM_GETTCLASS, tc_ctl_tclass, tc_dump_tclass,
- 0);
+ rtnl_register_many(psched_rtnl_msg_handlers);
tc_wrapper_init();
diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
index f2f9b75008bb..48dd8c88903f 100644
--- a/net/sched/sch_cake.c
+++ b/net/sched/sch_cake.c
@@ -484,13 +484,14 @@ static bool cobalt_queue_empty(struct cobalt_vars *vars,
/* Call this with a freshly dequeued packet for possible congestion marking.
* Returns true as an instruction to drop the packet, false for delivery.
*/
-static bool cobalt_should_drop(struct cobalt_vars *vars,
- struct cobalt_params *p,
- ktime_t now,
- struct sk_buff *skb,
- u32 bulk_flows)
+static enum skb_drop_reason cobalt_should_drop(struct cobalt_vars *vars,
+ struct cobalt_params *p,
+ ktime_t now,
+ struct sk_buff *skb,
+ u32 bulk_flows)
{
- bool next_due, over_target, drop = false;
+ enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
+ bool next_due, over_target;
ktime_t schedule;
u64 sojourn;
@@ -533,7 +534,8 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
if (next_due && vars->dropping) {
/* Use ECN mark if possible, otherwise drop */
- drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
+ if (!(vars->ecn_marked = INET_ECN_set_ce(skb)))
+ reason = SKB_DROP_REASON_QDISC_CONGESTED;
vars->count++;
if (!vars->count)
@@ -556,16 +558,17 @@ static bool cobalt_should_drop(struct cobalt_vars *vars,
}
/* Simple BLUE implementation. Lack of ECN is deliberate. */
- if (vars->p_drop)
- drop |= (get_random_u32() < vars->p_drop);
+ if (vars->p_drop && reason == SKB_NOT_DROPPED_YET &&
+ get_random_u32() < vars->p_drop)
+ reason = SKB_DROP_REASON_CAKE_FLOOD;
/* Overload the drop_next field as an activity timeout */
if (!vars->count)
vars->drop_next = ktime_add_ns(now, p->interval);
- else if (ktime_to_ns(schedule) > 0 && !drop)
+ else if (ktime_to_ns(schedule) > 0 && reason == SKB_NOT_DROPPED_YET)
vars->drop_next = now;
- return drop;
+ return reason;
}
static bool cake_update_flowkeys(struct flow_keys *keys,
@@ -627,6 +630,63 @@ static bool cake_ddst(int flow_mode)
return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
}
+static void cake_dec_srchost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_dsrc(flow_mode) &&
+ q->hosts[flow->srchost].srchost_bulk_flow_count))
+ q->hosts[flow->srchost].srchost_bulk_flow_count--;
+}
+
+static void cake_inc_srchost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_dsrc(flow_mode) &&
+ q->hosts[flow->srchost].srchost_bulk_flow_count < CAKE_QUEUES))
+ q->hosts[flow->srchost].srchost_bulk_flow_count++;
+}
+
+static void cake_dec_dsthost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_ddst(flow_mode) &&
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count))
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count--;
+}
+
+static void cake_inc_dsthost_bulk_flow_count(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ if (likely(cake_ddst(flow_mode) &&
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count < CAKE_QUEUES))
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count++;
+}
+
+static u16 cake_get_flow_quantum(struct cake_tin_data *q,
+ struct cake_flow *flow,
+ int flow_mode)
+{
+ u16 host_load = 1;
+
+ if (cake_dsrc(flow_mode))
+ host_load = max(host_load,
+ q->hosts[flow->srchost].srchost_bulk_flow_count);
+
+ if (cake_ddst(flow_mode))
+ host_load = max(host_load,
+ q->hosts[flow->dsthost].dsthost_bulk_flow_count);
+
+ /* The get_random_u16() is a way to apply dithering to avoid
+ * accumulating roundoff errors
+ */
+ return (q->flow_quantum * quantum_div[host_load] +
+ get_random_u16()) >> 16;
+}
+
static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
int flow_mode, u16 flow_override, u16 host_override)
{
@@ -773,10 +833,8 @@ skip_hash:
allocate_dst = cake_ddst(flow_mode);
if (q->flows[outer_hash + k].set == CAKE_SET_BULK) {
- if (allocate_src)
- q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--;
- if (allocate_dst)
- q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--;
+ cake_dec_srchost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
+ cake_dec_dsthost_bulk_flow_count(q, &q->flows[outer_hash + k], flow_mode);
}
found:
/* reserve queue for future packets in same flow */
@@ -801,9 +859,10 @@ found:
q->hosts[outer_hash + k].srchost_tag = srchost_hash;
found_src:
srchost_idx = outer_hash + k;
- if (q->flows[reduced_hash].set == CAKE_SET_BULK)
- q->hosts[srchost_idx].srchost_bulk_flow_count++;
q->flows[reduced_hash].srchost = srchost_idx;
+
+ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+ cake_inc_srchost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
}
if (allocate_dst) {
@@ -824,9 +883,10 @@ found_src:
q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
found_dst:
dsthost_idx = outer_hash + k;
- if (q->flows[reduced_hash].set == CAKE_SET_BULK)
- q->hosts[dsthost_idx].dsthost_bulk_flow_count++;
q->flows[reduced_hash].dsthost = dsthost_idx;
+
+ if (q->flows[reduced_hash].set == CAKE_SET_BULK)
+ cake_inc_dsthost_bulk_flow_count(q, &q->flows[reduced_hash], flow_mode);
}
}
@@ -1525,17 +1585,16 @@ static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
b->backlogs[idx] -= len;
b->tin_backlog -= len;
sch->qstats.backlog -= len;
- qdisc_tree_reduce_backlog(sch, 1, len);
flow->dropped++;
b->tin_dropped++;
- sch->qstats.drops++;
if (q->rate_flags & CAKE_FLAG_INGRESS)
cake_advance_shaper(q, b, skb, now, true);
- __qdisc_drop(skb, to_free);
+ qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
sch->q.qlen--;
+ qdisc_tree_reduce_backlog(sch, 1, len);
cake_heapify(q, 0);
@@ -1839,10 +1898,6 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* flowchain */
if (!flow->set || flow->set == CAKE_SET_DECAYING) {
- struct cake_host *srchost = &b->hosts[flow->srchost];
- struct cake_host *dsthost = &b->hosts[flow->dsthost];
- u16 host_load = 1;
-
if (!flow->set) {
list_add_tail(&flow->flowchain, &b->new_flows);
} else {
@@ -1852,18 +1907,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
flow->set = CAKE_SET_SPARSE;
b->sparse_flow_count++;
- if (cake_dsrc(q->flow_mode))
- host_load = max(host_load, srchost->srchost_bulk_flow_count);
-
- if (cake_ddst(q->flow_mode))
- host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
-
- flow->deficit = (b->flow_quantum *
- quantum_div[host_load]) >> 16;
+ flow->deficit = cake_get_flow_quantum(b, flow, q->flow_mode);
} else if (flow->set == CAKE_SET_SPARSE_WAIT) {
- struct cake_host *srchost = &b->hosts[flow->srchost];
- struct cake_host *dsthost = &b->hosts[flow->dsthost];
-
/* this flow was empty, accounted as a sparse flow, but actually
* in the bulk rotation.
*/
@@ -1871,12 +1916,8 @@ static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
b->sparse_flow_count--;
b->bulk_flow_count++;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count++;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count++;
-
+ cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
}
if (q->buffer_used > q->buffer_max_used)
@@ -1926,20 +1967,19 @@ static void cake_clear_tin(struct Qdisc *sch, u16 tin)
q->cur_tin = tin;
for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
while (!!(skb = cake_dequeue_one(sch)))
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_QUEUE_PURGE);
}
static struct sk_buff *cake_dequeue(struct Qdisc *sch)
{
struct cake_sched_data *q = qdisc_priv(sch);
struct cake_tin_data *b = &q->tins[q->cur_tin];
- struct cake_host *srchost, *dsthost;
+ enum skb_drop_reason reason;
ktime_t now = ktime_get();
struct cake_flow *flow;
struct list_head *head;
bool first_flow = true;
struct sk_buff *skb;
- u16 host_load;
u64 delay;
u32 len;
@@ -2039,11 +2079,6 @@ retry:
q->cur_flow = flow - b->flows;
first_flow = false;
- /* triple isolation (modified DRR++) */
- srchost = &b->hosts[flow->srchost];
- dsthost = &b->hosts[flow->dsthost];
- host_load = 1;
-
/* flow isolation (DRR++) */
if (flow->deficit <= 0) {
/* Keep all flows with deficits out of the sparse and decaying
@@ -2055,11 +2090,8 @@ retry:
b->sparse_flow_count--;
b->bulk_flow_count++;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count++;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count++;
+ cake_inc_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_inc_dsthost_bulk_flow_count(b, flow, q->flow_mode);
flow->set = CAKE_SET_BULK;
} else {
@@ -2071,19 +2103,7 @@ retry:
}
}
- if (cake_dsrc(q->flow_mode))
- host_load = max(host_load, srchost->srchost_bulk_flow_count);
-
- if (cake_ddst(q->flow_mode))
- host_load = max(host_load, dsthost->dsthost_bulk_flow_count);
-
- WARN_ON(host_load > CAKE_QUEUES);
-
- /* The get_random_u16() is a way to apply dithering to avoid
- * accumulating roundoff errors
- */
- flow->deficit += (b->flow_quantum * quantum_div[host_load] +
- get_random_u16()) >> 16;
+ flow->deficit += cake_get_flow_quantum(b, flow, q->flow_mode);
list_move_tail(&flow->flowchain, &b->old_flows);
goto retry;
@@ -2107,11 +2127,8 @@ retry:
if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count--;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count--;
+ cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
b->decaying_flow_count++;
} else if (flow->set == CAKE_SET_SPARSE ||
@@ -2129,12 +2146,8 @@ retry:
else if (flow->set == CAKE_SET_BULK) {
b->bulk_flow_count--;
- if (cake_dsrc(q->flow_mode))
- srchost->srchost_bulk_flow_count--;
-
- if (cake_ddst(q->flow_mode))
- dsthost->dsthost_bulk_flow_count--;
-
+ cake_dec_srchost_bulk_flow_count(b, flow, q->flow_mode);
+ cake_dec_dsthost_bulk_flow_count(b, flow, q->flow_mode);
} else
b->decaying_flow_count--;
@@ -2143,12 +2156,12 @@ retry:
goto begin;
}
+ reason = cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
+ (b->bulk_flow_count *
+ !!(q->rate_flags &
+ CAKE_FLAG_INGRESS)));
/* Last packet in queue may be marked, shouldn't be dropped */
- if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
- (b->bulk_flow_count *
- !!(q->rate_flags &
- CAKE_FLAG_INGRESS))) ||
- !flow->head)
+ if (reason == SKB_NOT_DROPPED_YET || !flow->head)
break;
/* drop this packet, get another one */
@@ -2162,7 +2175,7 @@ retry:
b->tin_dropped++;
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_qstats_drop(sch);
- kfree_skb(skb);
+ kfree_skb_reason(skb, reason);
if (q->rate_flags & CAKE_FLAG_INGRESS)
goto retry;
}
diff --git a/net/sched/sch_cbs.c b/net/sched/sch_cbs.c
index 939425da1895..8c9a0400c862 100644
--- a/net/sched/sch_cbs.c
+++ b/net/sched/sch_cbs.c
@@ -310,7 +310,7 @@ static void cbs_set_port_rate(struct net_device *dev, struct cbs_sched_data *q)
{
struct ethtool_link_ksettings ecmd;
int speed = SPEED_10;
- int port_rate;
+ s64 port_rate;
int err;
err = __ethtool_get_link_ksettings(dev, &ecmd);
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 91072010923d..59e7bdf5063e 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -123,10 +123,10 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
if (idx == q->tail)
choke_zap_tail_holes(q);
+ --sch->q.qlen;
qdisc_qstats_backlog_dec(sch, skb);
qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
qdisc_drop(skb, sch, to_free);
- --sch->q.qlen;
}
struct choke_skb_cb {
@@ -356,7 +356,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt,
tb[TCA_CHOKE_STAB] == NULL)
return -EINVAL;
- max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
+ max_P = nla_get_u32_default(tb[TCA_CHOKE_MAX_P], 0);
ctl = nla_data(tb[TCA_CHOKE_PARMS]);
stab = nla_data(tb[TCA_CHOKE_STAB]);
diff --git a/net/sched/sch_codel.c b/net/sched/sch_codel.c
index 3e8d4fe4d91e..c93761040c6e 100644
--- a/net/sched/sch_codel.c
+++ b/net/sched/sch_codel.c
@@ -52,7 +52,7 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{
struct Qdisc *sch = ctx;
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED);
qdisc_qstats_drop(sch);
}
@@ -65,10 +65,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
&q->stats, qdisc_pkt_len, codel_get_enqueue_time,
drop_func, dequeue_func);
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
- * or HTB crashes. Defer it for next round.
- */
- if (q->stats.drop_count && sch->q.qlen) {
+ if (q->stats.drop_count) {
qdisc_tree_reduce_backlog(sch, q->stats.drop_count, q->stats.drop_len);
q->stats.drop_count = 0;
q->stats.drop_len = 0;
@@ -89,7 +86,8 @@ static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
q = qdisc_priv(sch);
q->drop_overlimit++;
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free,
+ SKB_DROP_REASON_QDISC_OVERLIMIT);
}
static const struct nla_policy codel_policy[TCA_CODEL_MAX + 1] = {
@@ -146,7 +144,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt,
qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
- struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index c69b999fae17..9b6d79bd8737 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -35,6 +35,11 @@ struct drr_sched {
struct Qdisc_class_hash clhash;
};
+static bool cl_is_active(struct drr_class *cl)
+{
+ return !list_empty(&cl->alist);
+}
+
static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
{
struct drr_sched *q = qdisc_priv(sch);
@@ -105,6 +110,7 @@ static int drr_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
return -ENOBUFS;
gnet_stats_basic_sync_init(&cl->bstats);
+ INIT_LIST_HEAD(&cl->alist);
cl->common.classid = classid;
cl->quantum = quantum;
cl->qdisc = qdisc_create_dflt(sch->dev_queue,
@@ -229,7 +235,7 @@ static void drr_qlen_notify(struct Qdisc *csh, unsigned long arg)
{
struct drr_class *cl = (struct drr_class *)arg;
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
}
static int drr_dump_class(struct Qdisc *sch, unsigned long arg,
@@ -336,7 +342,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl;
int err = 0;
- bool first;
cl = drr_classify(skb, sch, &err);
if (cl == NULL) {
@@ -346,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -356,7 +360,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- if (first) {
+ if (!cl_is_active(cl)) {
list_add_tail(&cl->alist, &q->active);
cl->deficit = cl->quantum;
}
@@ -390,7 +394,7 @@ static struct sk_buff *drr_dequeue(struct Qdisc *sch)
if (unlikely(skb == NULL))
goto out;
if (cl->qdisc->q.qlen == 0)
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
bstats_update(&cl->bstats, skb);
qdisc_bstats_update(sch, skb);
@@ -431,7 +435,7 @@ static void drr_reset_qdisc(struct Qdisc *sch)
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
if (cl->qdisc->q.qlen)
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
qdisc_reset(cl->qdisc);
}
}
diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
index f80bc05d4c5a..037f764822b9 100644
--- a/net/sched/sch_ets.c
+++ b/net/sched/sch_ets.c
@@ -74,6 +74,11 @@ static const struct nla_policy ets_class_policy[TCA_ETS_MAX + 1] = {
[TCA_ETS_QUANTA_BAND] = { .type = NLA_U32 },
};
+static bool cl_is_active(struct ets_class *cl)
+{
+ return !list_empty(&cl->alist);
+}
+
static int ets_quantum_parse(struct Qdisc *sch, const struct nlattr *attr,
unsigned int *quantum,
struct netlink_ext_ack *extack)
@@ -91,6 +96,8 @@ ets_class_from_arg(struct Qdisc *sch, unsigned long arg)
{
struct ets_sched *q = qdisc_priv(sch);
+ if (arg == 0 || arg > q->nbands)
+ return NULL;
return &q->classes[arg - 1];
}
@@ -291,7 +298,7 @@ static void ets_class_qlen_notify(struct Qdisc *sch, unsigned long arg)
* to remove them.
*/
if (!ets_class_is_strict(q, cl) && sch->q.qlen)
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
}
static int ets_class_dump(struct Qdisc *sch, unsigned long arg,
@@ -414,7 +421,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct ets_sched *q = qdisc_priv(sch);
struct ets_class *cl;
int err = 0;
- bool first;
cl = ets_classify(skb, sch, &err);
if (!cl) {
@@ -424,7 +430,6 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) {
@@ -434,7 +439,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return err;
}
- if (first && !ets_class_is_strict(q, cl)) {
+ if (!cl_is_active(cl) && !ets_class_is_strict(q, cl)) {
list_add_tail(&cl->alist, &q->active);
cl->deficit = cl->quantum;
}
@@ -486,7 +491,7 @@ static struct sk_buff *ets_qdisc_dequeue(struct Qdisc *sch)
if (unlikely(!skb))
goto out;
if (cl->qdisc->q.qlen == 0)
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
return ets_qdisc_dequeue_skb(sch, skb);
}
@@ -655,8 +660,8 @@ static int ets_qdisc_change(struct Qdisc *sch, struct nlattr *opt,
}
for (i = q->nbands; i < oldbands; i++) {
if (i >= q->nstrict && q->classes[i].qdisc->q.qlen)
- list_del(&q->classes[i].alist);
- qdisc_tree_flush_backlog(q->classes[i].qdisc);
+ list_del_init(&q->classes[i].alist);
+ qdisc_purge_queue(q->classes[i].qdisc);
}
WRITE_ONCE(q->nstrict, nstrict);
memcpy(q->prio2band, priomap, sizeof(priomap));
@@ -711,7 +716,7 @@ static void ets_qdisc_reset(struct Qdisc *sch)
for (band = q->nstrict; band < q->nbands; band++) {
if (q->classes[band].qdisc->q.qlen)
- list_del(&q->classes[band].alist);
+ list_del_init(&q->classes[band].alist);
}
for (band = 0; band < q->nbands; band++)
qdisc_reset(q->classes[band].qdisc);
diff --git a/net/sched/sch_fifo.c b/net/sched/sch_fifo.c
index b50b2c2cc09b..e6bfd39ff339 100644
--- a/net/sched/sch_fifo.c
+++ b/net/sched/sch_fifo.c
@@ -40,6 +40,9 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
{
unsigned int prev_backlog;
+ if (unlikely(READ_ONCE(sch->limit) == 0))
+ return qdisc_drop(skb, sch, to_free);
+
if (likely(sch->q.qlen < READ_ONCE(sch->limit)))
return qdisc_enqueue_tail(skb, sch);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 19a49af5a9e5..902ff5470607 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -111,6 +111,7 @@ struct fq_perband_flows {
struct fq_sched_data {
/* Read mostly cache line */
+ u64 offload_horizon;
u32 quantum;
u32 initial_quantum;
u32 flow_refill_delay;
@@ -299,7 +300,7 @@ static void fq_gc(struct fq_sched_data *q,
}
/* Fast path can be used if :
- * 1) Packet tstamp is in the past.
+ * 1) Packet tstamp is in the past, or within the pacing offload horizon.
* 2) FQ qlen == 0 OR
* (no flow is currently eligible for transmit,
* AND fast path queue has less than 8 packets)
@@ -314,7 +315,7 @@ static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
const struct fq_sched_data *q = qdisc_priv(sch);
const struct sock *sk;
- if (fq_skb_cb(skb)->time_to_send > now)
+ if (fq_skb_cb(skb)->time_to_send > now + q->offload_horizon)
return false;
if (sch->q.qlen != 0) {
@@ -331,6 +332,12 @@ static bool fq_fastpath_check(const struct Qdisc *sch, struct sk_buff *skb,
*/
if (q->internal.qlen >= 8)
return false;
+
+ /* Ordering invariants fall apart if some delayed flows
+ * are ready but we haven't serviced them, yet.
+ */
+ if (q->time_next_delayed_flow <= now + q->offload_horizon)
+ return false;
}
sk = skb->sk;
@@ -361,8 +368,9 @@ static struct fq_flow *fq_classify(struct Qdisc *sch, struct sk_buff *skb,
* 3) We do not want to rate limit them (eg SYNFLOOD attack),
* especially if the listener set SO_MAX_PACING_RATE
* 4) We pretend they are orphaned
+ * TCP can also associate TIME_WAIT sockets with RST or ACK packets.
*/
- if (!sk || sk_listener(sk)) {
+ if (!sk || sk_listener_or_tw(sk)) {
unsigned long hash = skb_get_hash(skb) & q->orphan_mask;
/* By forcing low order bit to 1, we make sure to not
@@ -529,6 +537,8 @@ static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
return unlikely((s64)skb->tstamp > (s64)(now + q->horizon));
}
+#define FQDR(reason) SKB_DROP_REASON_FQ_##reason
+
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
@@ -540,7 +550,8 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
band = fq_prio2band(q->prio2band, skb->priority & TC_PRIO_MAX);
if (unlikely(q->band_pkt_count[band] >= sch->limit)) {
q->stat_band_drops[band]++;
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free,
+ FQDR(BAND_LIMIT));
}
now = ktime_get_ns();
@@ -550,8 +561,9 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* Check if packet timestamp is too far in the future. */
if (fq_packet_beyond_horizon(skb, q, now)) {
if (q->horizon_drop) {
- q->stat_horizon_drops++;
- return qdisc_drop(skb, sch, to_free);
+ q->stat_horizon_drops++;
+ return qdisc_drop_reason(skb, sch, to_free,
+ FQDR(HORIZON_LIMIT));
}
q->stat_horizon_caps++;
skb->tstamp = now + q->horizon;
@@ -564,7 +576,8 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
if (f != &q->internal) {
if (unlikely(f->qlen >= q->flow_plimit)) {
q->stat_flows_plimit++;
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free,
+ FQDR(FLOW_LIMIT));
}
if (fq_flow_is_detached(f)) {
@@ -589,21 +602,25 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
return NET_XMIT_SUCCESS;
}
+#undef FQDR
static void fq_check_throttled(struct fq_sched_data *q, u64 now)
{
unsigned long sample;
struct rb_node *p;
- if (q->time_next_delayed_flow > now)
+ if (q->time_next_delayed_flow > now + q->offload_horizon)
return;
/* Update unthrottle latency EWMA.
* This is cheap and can help diagnosing timer/latency problems.
*/
sample = (unsigned long)(now - q->time_next_delayed_flow);
- q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
- q->unthrottle_latency_ns += sample >> 3;
+ if ((long)sample > 0) {
+ q->unthrottle_latency_ns -= q->unthrottle_latency_ns >> 3;
+ q->unthrottle_latency_ns += sample >> 3;
+ }
+ now += q->offload_horizon;
q->time_next_delayed_flow = ~0ULL;
while ((p = rb_first(&q->delayed)) != NULL) {
@@ -687,7 +704,7 @@ begin:
u64 time_next_packet = max_t(u64, fq_skb_cb(skb)->time_to_send,
f->time_next_packet);
- if (now < time_next_packet) {
+ if (now + q->offload_horizon < time_next_packet) {
head->first = f->next;
f->time_next_packet = time_next_packet;
fq_flow_set_throttled(q, f);
@@ -925,6 +942,7 @@ static const struct nla_policy fq_policy[TCA_FQ_MAX + 1] = {
[TCA_FQ_HORIZON_DROP] = { .type = NLA_U8 },
[TCA_FQ_PRIOMAP] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_prio_qopt)),
[TCA_FQ_WEIGHTS] = NLA_POLICY_EXACT_LEN(FQ_BANDS * sizeof(s32)),
+ [TCA_FQ_OFFLOAD_HORIZON] = { .type = NLA_U32 },
};
/* compress a u8 array with all elems <= 3 to an array of 2-bit fields */
@@ -1100,6 +1118,17 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
WRITE_ONCE(q->horizon_drop,
nla_get_u8(tb[TCA_FQ_HORIZON_DROP]));
+ if (tb[TCA_FQ_OFFLOAD_HORIZON]) {
+ u64 offload_horizon = (u64)NSEC_PER_USEC *
+ nla_get_u32(tb[TCA_FQ_OFFLOAD_HORIZON]);
+
+ if (offload_horizon <= qdisc_dev(sch)->max_pacing_offload_horizon) {
+ WRITE_ONCE(q->offload_horizon, offload_horizon);
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "invalid offload_horizon");
+ err = -EINVAL;
+ }
+ }
if (!err) {
sch_tree_unlock(sch);
@@ -1107,7 +1136,7 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
}
while (sch->q.qlen > sch->limit) {
- struct sk_buff *skb = fq_dequeue(sch);
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
if (!skb)
break;
@@ -1183,6 +1212,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
.bands = FQ_BANDS,
};
struct nlattr *opts;
+ u64 offload_horizon;
u64 ce_threshold;
s32 weights[3];
u64 horizon;
@@ -1199,6 +1229,9 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
horizon = READ_ONCE(q->horizon);
do_div(horizon, NSEC_PER_USEC);
+ offload_horizon = READ_ONCE(q->offload_horizon);
+ do_div(offload_horizon, NSEC_PER_USEC);
+
if (nla_put_u32(skb, TCA_FQ_PLIMIT,
READ_ONCE(sch->limit)) ||
nla_put_u32(skb, TCA_FQ_FLOW_PLIMIT,
@@ -1224,6 +1257,7 @@ static int fq_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_put_u32(skb, TCA_FQ_TIMER_SLACK,
READ_ONCE(q->timer_slack)) ||
nla_put_u32(skb, TCA_FQ_HORIZON, (u32)horizon) ||
+ nla_put_u32(skb, TCA_FQ_OFFLOAD_HORIZON, (u32)offload_horizon) ||
nla_put_u8(skb, TCA_FQ_HORIZON_DROP,
READ_ONCE(q->horizon_drop)))
goto nla_put_failure;
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index 4f908c11ba95..2a0f3a513bfa 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -168,6 +168,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
skb = dequeue_head(flow);
len += qdisc_pkt_len(skb);
mem += get_codel_cb(skb)->mem_usage;
+ tcf_set_drop_reason(skb, SKB_DROP_REASON_QDISC_OVERLIMIT);
__qdisc_drop(skb, to_free);
} while (++i < max_packets && len < threshold);
@@ -274,7 +275,7 @@ static void drop_func(struct sk_buff *skb, void *ctx)
{
struct Qdisc *sch = ctx;
- kfree_skb(skb);
+ kfree_skb_reason(skb, SKB_DROP_REASON_QDISC_CONGESTED);
qdisc_qstats_drop(sch);
}
@@ -314,10 +315,8 @@ begin:
}
qdisc_bstats_update(sch, skb);
flow->deficit -= qdisc_pkt_len(skb);
- /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
- * or HTB crashes. Defer it for next round.
- */
- if (q->cstats.drop_count && sch->q.qlen) {
+
+ if (q->cstats.drop_count) {
qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
q->cstats.drop_len);
q->cstats.drop_count = 0;
@@ -442,7 +441,7 @@ static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
while (sch->q.qlen > sch->limit ||
q->memory_usage > q->memory_limit) {
- struct sk_buff *skb = fq_codel_dequeue(sch);
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
q->cstats.drop_len += qdisc_pkt_len(skb);
rtnl_kfree_skbs(skb, skb);
diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
index c38f33ff80bd..b0e34daf1f75 100644
--- a/net/sched/sch_fq_pie.c
+++ b/net/sched/sch_fq_pie.c
@@ -130,6 +130,7 @@ static inline void flow_queue_add(struct fq_pie_flow *flow,
static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT;
struct fq_pie_sched_data *q = qdisc_priv(sch);
struct fq_pie_flow *sel_flow;
int ret;
@@ -161,6 +162,8 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->overmemory++;
}
+ reason = SKB_DROP_REASON_QDISC_CONGESTED;
+
if (!pie_drop_early(sch, &q->p_params, &sel_flow->vars,
sel_flow->backlog, skb->len)) {
enqueue = true;
@@ -198,8 +201,7 @@ static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
out:
q->stats.dropped++;
sel_flow->vars.accu_prob = 0;
- __qdisc_drop(skb, to_free);
- qdisc_qstats_drop(sch);
+ qdisc_drop_reason(skb, sch, to_free, reason);
return NET_XMIT_CN;
}
@@ -364,7 +366,7 @@ static int fq_pie_change(struct Qdisc *sch, struct nlattr *opt,
/* Drop excess packets if new limit is lower */
while (sch->q.qlen > sch->limit) {
- struct sk_buff *skb = fq_pie_qdisc_dequeue(sch);
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
len_dropped += qdisc_pkt_len(skb);
num_dropped += 1;
@@ -382,7 +384,7 @@ flow_error:
static void fq_pie_timer(struct timer_list *t)
{
- struct fq_pie_sched_data *q = from_timer(q, t, adapt_timer);
+ struct fq_pie_sched_data *q = timer_container_of(q, t, adapt_timer);
unsigned long next, tupdate;
struct Qdisc *sch = q->sch;
spinlock_t *root_lock; /* to lock qdisc for probability calculations */
@@ -553,7 +555,7 @@ static void fq_pie_destroy(struct Qdisc *sch)
tcf_block_put(q->block);
q->p_params.tupdate = 0;
- del_timer_sync(&q->adapt_timer);
+ timer_delete_sync(&q->adapt_timer);
kvfree(q->flows);
}
diff --git a/net/sched/sch_frag.c b/net/sched/sch_frag.c
index ce63414185fd..d1d87dce7f3f 100644
--- a/net/sched/sch_frag.c
+++ b/net/sched/sch_frag.c
@@ -16,14 +16,18 @@ struct sch_frag_data {
unsigned int l2_len;
u8 l2_data[VLAN_ETH_HLEN];
int (*xmit)(struct sk_buff *skb);
+ local_lock_t bh_lock;
};
-static DEFINE_PER_CPU(struct sch_frag_data, sch_frag_data_storage);
+static DEFINE_PER_CPU(struct sch_frag_data, sch_frag_data_storage) = {
+ .bh_lock = INIT_LOCAL_LOCK(bh_lock),
+};
static int sch_frag_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct sch_frag_data *data = this_cpu_ptr(&sch_frag_data_storage);
+ lockdep_assert_held(&data->bh_lock);
if (skb_cow_head(skb, data->l2_len) < 0) {
kfree_skb(skb);
return -ENOMEM;
@@ -95,6 +99,7 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
struct rtable sch_frag_rt = { 0 };
unsigned long orig_dst;
+ local_lock_nested_bh(&sch_frag_data_storage.bh_lock);
sch_frag_prepare_frag(skb, xmit);
dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL,
DST_OBSOLETE_NONE, DST_NOCOUNT);
@@ -105,11 +110,13 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
IPCB(skb)->frag_max_size = mru;
ret = ip_do_fragment(net, skb->sk, skb, sch_frag_xmit);
+ local_unlock_nested_bh(&sch_frag_data_storage.bh_lock);
refdst_drop(orig_dst);
} else if (skb_protocol(skb, true) == htons(ETH_P_IPV6)) {
unsigned long orig_dst;
struct rt6_info sch_frag_rt;
+ local_lock_nested_bh(&sch_frag_data_storage.bh_lock);
sch_frag_prepare_frag(skb, xmit);
memset(&sch_frag_rt, 0, sizeof(sch_frag_rt));
dst_init(&sch_frag_rt.dst, &sch_frag_dst_ops, NULL,
@@ -122,6 +129,7 @@ static int sch_fragment(struct net *net, struct sk_buff *skb,
ret = ipv6_stub->ipv6_fragment(net, skb->sk, skb,
sch_frag_xmit);
+ local_unlock_nested_bh(&sch_frag_data_storage.bh_lock);
refdst_drop(orig_dst);
} else {
net_warn_ratelimited("Fail frag %s: eth=%x, MRU=%d, MTU=%d\n",
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 38ec18f73de4..16afb834fe4a 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -24,6 +24,7 @@
#include <linux/if_vlan.h>
#include <linux/skb_array.h>
#include <linux/if_macvlan.h>
+#include <linux/bpf.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/dst.h>
@@ -495,7 +496,7 @@ EXPORT_SYMBOL(netif_tx_unlock);
static void dev_watchdog(struct timer_list *t)
{
- struct net_device *dev = from_timer(dev, t, watchdog_timer);
+ struct net_device *dev = timer_container_of(dev, t, watchdog_timer);
bool release = true;
spin_lock(&dev->tx_global_lock);
@@ -551,28 +552,23 @@ static void dev_watchdog(struct timer_list *t)
netdev_put(dev, &dev->watchdog_dev_tracker);
}
-void __netdev_watchdog_up(struct net_device *dev)
+void netdev_watchdog_up(struct net_device *dev)
{
- if (dev->netdev_ops->ndo_tx_timeout) {
- if (dev->watchdog_timeo <= 0)
- dev->watchdog_timeo = 5*HZ;
- if (!mod_timer(&dev->watchdog_timer,
- round_jiffies(jiffies + dev->watchdog_timeo)))
- netdev_hold(dev, &dev->watchdog_dev_tracker,
- GFP_ATOMIC);
- }
-}
-EXPORT_SYMBOL_GPL(__netdev_watchdog_up);
-
-static void dev_watchdog_up(struct net_device *dev)
-{
- __netdev_watchdog_up(dev);
+ if (!dev->netdev_ops->ndo_tx_timeout)
+ return;
+ if (dev->watchdog_timeo <= 0)
+ dev->watchdog_timeo = 5*HZ;
+ if (!mod_timer(&dev->watchdog_timer,
+ round_jiffies(jiffies + dev->watchdog_timeo)))
+ netdev_hold(dev, &dev->watchdog_dev_tracker,
+ GFP_ATOMIC);
}
+EXPORT_SYMBOL_GPL(netdev_watchdog_up);
-static void dev_watchdog_down(struct net_device *dev)
+static void netdev_watchdog_down(struct net_device *dev)
{
netif_tx_lock_bh(dev);
- if (del_timer(&dev->watchdog_timer))
+ if (timer_delete(&dev->watchdog_timer))
netdev_put(dev, &dev->watchdog_dev_tracker);
netif_tx_unlock_bh(dev);
}
@@ -591,7 +587,7 @@ void netif_carrier_on(struct net_device *dev)
atomic_inc(&dev->carrier_up_count);
linkwatch_fire_event(dev);
if (netif_running(dev))
- __netdev_watchdog_up(dev);
+ netdev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(netif_carrier_on);
@@ -911,8 +907,8 @@ static int pfifo_fast_change_tx_queue_len(struct Qdisc *sch,
bands[prio] = q;
}
- return skb_array_resize_multiple(bands, PFIFO_FAST_BANDS, new_len,
- GFP_KERNEL);
+ return skb_array_resize_multiple_bh(bands, PFIFO_FAST_BANDS, new_len,
+ GFP_KERNEL);
}
struct Qdisc_ops pfifo_fast_ops __read_mostly = {
@@ -1006,14 +1002,14 @@ struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
{
struct Qdisc *sch;
- if (!try_module_get(ops->owner)) {
+ if (!bpf_try_module_get(ops, ops->owner)) {
NL_SET_ERR_MSG(extack, "Failed to increase module reference counter");
return NULL;
}
sch = qdisc_alloc(dev_queue, ops, extack);
if (IS_ERR(sch)) {
- module_put(ops->owner);
+ bpf_module_put(ops, ops->owner);
return NULL;
}
sch->parent = parentid;
@@ -1083,7 +1079,7 @@ static void __qdisc_destroy(struct Qdisc *qdisc)
ops->destroy(qdisc);
lockdep_unregister_key(&qdisc->root_lock_key);
- module_put(ops->owner);
+ bpf_module_put(ops, ops->owner);
netdev_put(dev, &qdisc->dev_tracker);
trace_qdisc_destroy(qdisc);
@@ -1267,7 +1263,7 @@ void dev_activate(struct net_device *dev)
if (need_watchdog) {
netif_trans_update(dev);
- dev_watchdog_up(dev);
+ netdev_watchdog_up(dev);
}
}
EXPORT_SYMBOL(dev_activate);
@@ -1282,15 +1278,17 @@ static void qdisc_deactivate(struct Qdisc *qdisc)
static void dev_deactivate_queue(struct net_device *dev,
struct netdev_queue *dev_queue,
- void *_qdisc_default)
+ void *_sync_needed)
{
- struct Qdisc *qdisc_default = _qdisc_default;
+ bool *sync_needed = _sync_needed;
struct Qdisc *qdisc;
qdisc = rtnl_dereference(dev_queue->qdisc);
if (qdisc) {
+ if (qdisc->enqueue)
+ *sync_needed = true;
qdisc_deactivate(qdisc);
- rcu_assign_pointer(dev_queue->qdisc, qdisc_default);
+ rcu_assign_pointer(dev_queue->qdisc, &noop_qdisc);
}
}
@@ -1357,24 +1355,22 @@ static bool some_qdisc_is_busy(struct net_device *dev)
*/
void dev_deactivate_many(struct list_head *head)
{
+ bool sync_needed = false;
struct net_device *dev;
list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_deactivate_queue,
- &noop_qdisc);
+ &sync_needed);
if (dev_ingress_queue(dev))
dev_deactivate_queue(dev, dev_ingress_queue(dev),
- &noop_qdisc);
+ &sync_needed);
- dev_watchdog_down(dev);
+ netdev_watchdog_down(dev);
}
- /* Wait for outstanding qdisc-less dev_queue_xmit calls or
- * outstanding qdisc enqueuing calls.
- * This is avoided if all devices are in dismantle phase :
- * Caller will call synchronize_net() for us
- */
- synchronize_net();
+ /* Wait for outstanding qdisc enqueuing calls. */
+ if (sync_needed)
+ synchronize_net();
list_for_each_entry(dev, head, close_list) {
netdev_for_each_tx_queue(dev, dev_reset_queue, NULL);
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 79ba9dc70254..532fde548b88 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -251,10 +251,10 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
q->stats.pdrop++;
drop:
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_OVERLIMIT);
congestion_drop:
- qdisc_drop(skb, sch, to_free);
+ qdisc_drop_reason(skb, sch, to_free, SKB_DROP_REASON_QDISC_CONGESTED);
return NET_XMIT_CN;
}
@@ -668,7 +668,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
return -EINVAL;
}
- max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
+ max_P = nla_get_u32_default(tb[TCA_GRED_MAX_P], 0);
ctl = nla_data(tb[TCA_GRED_PARMS]);
stab = nla_data(tb[TCA_GRED_STAB]);
@@ -913,7 +913,8 @@ static void gred_destroy(struct Qdisc *sch)
for (i = 0; i < table->DPs; i++)
gred_destroy_vq(table->tab[i]);
- gred_offload(sch, TC_GRED_DESTROY);
+ if (table->opt)
+ gred_offload(sch, TC_GRED_DESTROY);
kfree(table->opt);
}
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index c287bf8423b4..5a7745170e84 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -175,6 +175,11 @@ struct hfsc_sched {
#define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
+static bool cl_in_el_or_vttree(struct hfsc_class *cl)
+{
+ return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) ||
+ ((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node));
+}
/*
* eligible tree holds backlogged classes being sorted by their eligible times.
@@ -203,7 +208,10 @@ eltree_insert(struct hfsc_class *cl)
static inline void
eltree_remove(struct hfsc_class *cl)
{
- rb_erase(&cl->el_node, &cl->sched->eligible);
+ if (!RB_EMPTY_NODE(&cl->el_node)) {
+ rb_erase(&cl->el_node, &cl->sched->eligible);
+ RB_CLEAR_NODE(&cl->el_node);
+ }
}
static inline void
@@ -958,6 +966,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl != NULL) {
int old_flags;
+ int len = 0;
if (parentid) {
if (cl->cl_parent &&
@@ -988,9 +997,13 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (usc != NULL)
hfsc_change_usc(cl, usc, cur_time);
+ if (cl->qdisc->q.qlen != 0)
+ len = qdisc_peek_len(cl->qdisc);
+ /* Check queue length again since some qdisc implementations
+ * (e.g., netem/codel) might empty the queue during the peek
+ * operation.
+ */
if (cl->qdisc->q.qlen != 0) {
- int len = qdisc_peek_len(cl->qdisc);
-
if (cl->cl_flags & HFSC_RSC) {
if (old_flags & HFSC_RSC)
update_ed(cl, len);
@@ -1032,6 +1045,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (cl == NULL)
return -ENOBUFS;
+ RB_CLEAR_NODE(&cl->el_node);
+
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) {
kfree(cl);
@@ -1220,7 +1235,8 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
* needs to be called explicitly to remove a class from vttree.
*/
- update_vf(cl, 0, 0);
+ if (cl->cl_nactive)
+ update_vf(cl, 0, 0);
if (cl->cl_flags & HFSC_RSC)
eltree_remove(cl);
}
@@ -1560,7 +1576,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
return err;
}
- if (first) {
+ sch->qstats.backlog += len;
+ sch->q.qlen++;
+
+ if (first && !cl_in_el_or_vttree(cl)) {
if (cl->cl_flags & HFSC_RSC)
init_ed(cl, len);
if (cl->cl_flags & HFSC_FSC)
@@ -1575,9 +1594,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
}
- sch->qstats.backlog += len;
- sch->q.qlen++;
-
return NET_XMIT_SUCCESS;
}
@@ -1632,10 +1648,16 @@ hfsc_dequeue(struct Qdisc *sch)
if (cl->qdisc->q.qlen != 0) {
/* update ed */
next_len = qdisc_peek_len(cl->qdisc);
- if (realtime)
- update_ed(cl, next_len);
- else
- update_d(cl, next_len);
+ /* Check queue length again since some qdisc implementations
+ * (e.g., netem/codel) might empty the queue during the peek
+ * operation.
+ */
+ if (cl->qdisc->q.qlen != 0) {
+ if (realtime)
+ update_ed(cl, next_len);
+ else
+ update_d(cl, next_len);
+ }
} else {
/* the class becomes passive */
eltree_remove(cl);
diff --git a/net/sched/sch_hhf.c b/net/sched/sch_hhf.c
index 44d9efe1a96a..5aa434b46707 100644
--- a/net/sched/sch_hhf.c
+++ b/net/sched/sch_hhf.c
@@ -564,7 +564,7 @@ static int hhf_change(struct Qdisc *sch, struct nlattr *opt,
qlen = sch->q.qlen;
prev_backlog = sch->qstats.backlog;
while (sch->q.qlen > sch->limit) {
- struct sk_buff *skb = hhf_dequeue(sch);
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, false);
rtnl_kfree_skbs(skb, skb);
}
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index ff3de37874e4..14bf71f57057 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -348,7 +348,8 @@ static void htb_add_to_wait_tree(struct htb_sched *q,
*/
static inline void htb_next_rb_node(struct rb_node **n)
{
- *n = rb_next(*n);
+ if (*n)
+ *n = rb_next(*n);
}
/**
@@ -609,8 +610,8 @@ static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
*/
static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
{
- WARN_ON(!cl->prio_activity);
-
+ if (!cl->prio_activity)
+ return;
htb_deactivate_prios(q, cl);
cl->prio_activity = 0;
}
@@ -1738,8 +1739,7 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg,
if (cl->parent)
cl->parent->children--;
- if (cl->prio_activity)
- htb_deactivate(q, cl);
+ htb_deactivate(q, cl);
if (cl->cmode != HTB_CAN_SEND)
htb_safe_rb_erase(&cl->pq_node,
@@ -1810,8 +1810,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
NULL));
- rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
- ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
+ rate64 = nla_get_u64_default(tb[TCA_HTB_RATE64], 0);
+ ceil64 = nla_get_u64_default(tb[TCA_HTB_CEIL64], 0);
if (!cl) { /* new class */
struct net_device *dev = qdisc_dev(sch);
@@ -1947,8 +1947,7 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
/* turn parent into inner node */
qdisc_purge_queue(parent->leaf.q);
parent_qdisc = parent->leaf.q;
- if (parent->prio_activity)
- htb_deactivate(q, parent);
+ htb_deactivate(q, parent);
/* remove from evt list because of level change */
if (parent->cmode != HTB_CAN_SEND) {
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 39382ee1e331..fdd79d3ccd8c 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -17,6 +17,7 @@
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <linux/vmalloc.h>
+#include <linux/prandom.h>
#include <linux/rtnetlink.h>
#include <linux/reciprocal_div.h>
#include <linux/rbtree.h>
@@ -78,6 +79,8 @@ struct netem_sched_data {
struct sk_buff *t_head;
struct sk_buff *t_tail;
+ u32 t_len;
+
/* optional qdisc for classful handling (NULL at netem init) */
struct Qdisc *qdisc;
@@ -382,6 +385,7 @@ static void tfifo_reset(struct Qdisc *sch)
rtnl_kfree_skbs(q->t_head, q->t_tail);
q->t_head = NULL;
q->t_tail = NULL;
+ q->t_len = 0;
}
static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
@@ -411,6 +415,7 @@ static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
rb_link_node(&nskb->rbnode, parent, p);
rb_insert_color(&nskb->rbnode, &q->t_root);
}
+ q->t_len++;
sch->q.qlen++;
}
@@ -517,7 +522,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1<<get_random_u32_below(8);
}
- if (unlikely(sch->q.qlen >= sch->limit)) {
+ if (unlikely(q->t_len >= sch->limit)) {
/* re-link segs, so that qdisc_drop_all() frees them all */
skb->next = segs;
qdisc_drop_all(skb, sch, to_free);
@@ -701,8 +706,8 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
tfifo_dequeue:
skb = __qdisc_dequeue_head(&sch->q);
if (skb) {
- qdisc_qstats_backlog_dec(sch, skb);
deliver:
+ qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb);
return skb;
}
@@ -718,8 +723,7 @@ deliver:
if (time_to_send <= now && q->slot.slot_next <= now) {
netem_erase_head(q, skb);
- sch->q.qlen--;
- qdisc_qstats_backlog_dec(sch, skb);
+ q->t_len--;
skb->next = NULL;
skb->prev = NULL;
/* skb->dev shares skb->rbnode area,
@@ -745,17 +749,22 @@ deliver:
if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err))
qdisc_qstats_drop(sch);
+ sch->qstats.backlog -= pkt_len;
+ sch->q.qlen--;
qdisc_tree_reduce_backlog(sch, 1, pkt_len);
}
goto tfifo_dequeue;
}
+ sch->q.qlen--;
goto deliver;
}
if (q->qdisc) {
skb = q->qdisc->ops->dequeue(q->qdisc);
- if (skb)
+ if (skb) {
+ sch->q.qlen--;
goto deliver;
+ }
}
qdisc_watchdog_schedule_ns(&q->watchdog,
@@ -765,8 +774,10 @@ deliver:
if (q->qdisc) {
skb = q->qdisc->ops->dequeue(q->qdisc);
- if (skb)
+ if (skb) {
+ sch->q.qlen--;
goto deliver;
+ }
}
return NULL;
}
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index b3dcb845b327..ad46ee3ed5a9 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -85,6 +85,7 @@ EXPORT_SYMBOL_GPL(pie_drop_early);
static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT;
struct pie_sched_data *q = qdisc_priv(sch);
bool enqueue = false;
@@ -93,6 +94,8 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
goto out;
}
+ reason = SKB_DROP_REASON_QDISC_CONGESTED;
+
if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog,
skb->len)) {
enqueue = true;
@@ -121,7 +124,7 @@ static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
out:
q->stats.dropped++;
q->vars.accu_prob = 0;
- return qdisc_drop(skb, sch, to_free);
+ return qdisc_drop_reason(skb, sch, to_free, reason);
}
static const struct nla_policy pie_policy[TCA_PIE_MAX + 1] = {
@@ -192,7 +195,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt,
/* Drop excess packets if new limit is lower */
qlen = sch->q.qlen;
while (sch->q.qlen > sch->limit) {
- struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
+ struct sk_buff *skb = qdisc_dequeue_internal(sch, true);
dropped += qdisc_pkt_len(skb);
qdisc_qstats_backlog_dec(sch, skb);
@@ -421,7 +424,7 @@ EXPORT_SYMBOL_GPL(pie_calculate_probability);
static void pie_timer(struct timer_list *t)
{
- struct pie_sched_data *q = from_timer(q, t, adapt_timer);
+ struct pie_sched_data *q = timer_container_of(q, t, adapt_timer);
struct Qdisc *sch = q->sch;
spinlock_t *root_lock;
@@ -542,7 +545,7 @@ static void pie_destroy(struct Qdisc *sch)
struct pie_sched_data *q = qdisc_priv(sch);
q->params.tupdate = 0;
- del_timer_sync(&q->adapt_timer);
+ timer_delete_sync(&q->adapt_timer);
}
static struct Qdisc_ops pie_qdisc_ops __read_mostly = {
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index cc30f7a32f1a..9e2b9a490db2 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -211,7 +211,7 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
for (i = q->bands; i < oldbands; i++)
- qdisc_tree_flush_backlog(q->queues[i]);
+ qdisc_purge_queue(q->queues[i]);
for (i = oldbands; i < q->bands; i++) {
q->queues[i] = queues[i];
diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
index d584c0c25899..bf1282cb22eb 100644
--- a/net/sched/sch_qfq.c
+++ b/net/sched/sch_qfq.c
@@ -202,6 +202,11 @@ struct qfq_sched {
*/
enum update_reason {enqueue, requeue};
+static bool cl_is_active(struct qfq_class *cl)
+{
+ return !list_empty(&cl->alist);
+}
+
static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
{
struct qfq_sched *q = qdisc_priv(sch);
@@ -347,7 +352,7 @@ static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
struct qfq_aggregate *agg = cl->agg;
- list_del(&cl->alist); /* remove from RR queue of the aggregate */
+ list_del_init(&cl->alist); /* remove from RR queue of the aggregate */
if (list_empty(&agg->active)) /* agg is now inactive */
qfq_deactivate_agg(q, agg);
}
@@ -421,10 +426,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (err < 0)
return err;
- if (tb[TCA_QFQ_WEIGHT])
- weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
- else
- weight = 1;
+ weight = nla_get_u32_default(tb[TCA_QFQ_WEIGHT], 1);
if (tb[TCA_QFQ_LMAX]) {
lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
@@ -450,7 +452,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
if (q->wsum + delta_w > QFQ_MAX_WSUM) {
NL_SET_ERR_MSG_FMT_MOD(extack,
- "total weight out of range (%d + %u)\n",
+ "total weight out of range (%d + %u)",
delta_w, q->wsum);
return -EINVAL;
}
@@ -477,6 +479,7 @@ static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
gnet_stats_basic_sync_init(&cl->bstats);
cl->common.classid = classid;
cl->deficit = lmax;
+ INIT_LIST_HEAD(&cl->alist);
cl->qdisc = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
classid, NULL);
@@ -985,7 +988,7 @@ static struct sk_buff *agg_dequeue(struct qfq_aggregate *agg,
cl->deficit -= (int) len;
if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
- list_del(&cl->alist);
+ list_del_init(&cl->alist);
else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
cl->deficit += agg->lmax;
list_move_tail(&cl->alist, &agg->active);
@@ -1217,7 +1220,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct qfq_class *cl;
struct qfq_aggregate *agg;
int err = 0;
- bool first;
cl = qfq_classify(skb, sch, &err);
if (cl == NULL) {
@@ -1239,7 +1241,6 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
- first = !cl->qdisc->q.qlen;
err = qdisc_enqueue(skb, cl->qdisc, to_free);
if (unlikely(err != NET_XMIT_SUCCESS)) {
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
@@ -1255,8 +1256,8 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
++sch->q.qlen;
agg = cl->agg;
- /* if the queue was not empty, then done here */
- if (!first) {
+ /* if the class is active, then done here */
+ if (cl_is_active(cl)) {
if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
list_first_entry(&agg->active, struct qfq_class, alist)
== cl && cl->deficit < len)
@@ -1418,6 +1419,8 @@ static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
struct qfq_sched *q = qdisc_priv(sch);
struct qfq_class *cl = (struct qfq_class *)arg;
+ if (list_empty(&cl->alist))
+ return;
qfq_deactivate_class(q, cl);
}
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index b5f096588fae..479c42d11083 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -70,6 +70,7 @@ static int red_use_nodrop(struct red_sched_data *q)
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_CONGESTED;
struct red_sched_data *q = qdisc_priv(sch);
struct Qdisc *child = q->qdisc;
unsigned int len;
@@ -107,6 +108,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
break;
case RED_HARD_MARK:
+ reason = SKB_DROP_REASON_QDISC_OVERLIMIT;
qdisc_qstats_overlimit(sch);
if (red_use_harddrop(q) || !red_use_ecn(q)) {
q->stats.forced_drop++;
@@ -143,7 +145,7 @@ congestion_drop:
if (!skb)
return NET_XMIT_CN | ret;
- qdisc_drop(skb, sch, to_free);
+ qdisc_drop_reason(skb, sch, to_free, reason);
return NET_XMIT_CN;
}
@@ -216,7 +218,7 @@ static void red_destroy(struct Qdisc *sch)
tcf_qevent_destroy(&q->qe_mark, sch);
tcf_qevent_destroy(&q->qe_early_drop, sch);
- del_timer_sync(&q->adapt_timer);
+ timer_delete_sync(&q->adapt_timer);
red_offload(sch, false);
qdisc_put(q->qdisc);
}
@@ -248,7 +250,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
tb[TCA_RED_STAB] == NULL)
return -EINVAL;
- max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
+ max_P = nla_get_u32_default(tb[TCA_RED_MAX_P], 0);
ctl = nla_data(tb[TCA_RED_PARMS]);
stab = nla_data(tb[TCA_RED_STAB]);
@@ -283,7 +285,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
q->userbits = userbits;
q->limit = ctl->limit;
if (child) {
- qdisc_tree_flush_backlog(q->qdisc);
+ qdisc_purge_queue(q->qdisc);
old_child = q->qdisc;
q->qdisc = child;
}
@@ -295,7 +297,7 @@ static int __red_change(struct Qdisc *sch, struct nlattr **tb,
max_P);
red_set_vars(&q->vars);
- del_timer(&q->adapt_timer);
+ timer_delete(&q->adapt_timer);
if (ctl->flags & TC_RED_ADAPTATIVE)
mod_timer(&q->adapt_timer, jiffies + HZ/2);
@@ -319,7 +321,7 @@ unlock_out:
static inline void red_adaptative_timer(struct timer_list *t)
{
- struct red_sched_data *q = from_timer(q, t, adapt_timer);
+ struct red_sched_data *q = timer_container_of(q, t, adapt_timer);
struct Qdisc *sch = q->sch;
spinlock_t *root_lock;
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index b717e15a3a17..d2835f1168e1 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -280,6 +280,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
+ enum skb_drop_reason reason = SKB_DROP_REASON_QDISC_OVERLIMIT;
struct sfb_sched_data *q = qdisc_priv(sch);
unsigned int len = qdisc_pkt_len(skb);
struct Qdisc *child = q->qdisc;
@@ -380,6 +381,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
}
r = get_random_u16() & SFB_MAX_PROB;
+ reason = SKB_DROP_REASON_QDISC_CONGESTED;
if (unlikely(r < p_min)) {
if (unlikely(p_min > SFB_MAX_PROB / 2)) {
@@ -414,7 +416,7 @@ enqueue:
return ret;
drop:
- qdisc_drop(skb, sch, to_free);
+ qdisc_drop_reason(skb, sch, to_free, reason);
return NET_XMIT_CN;
other_drop:
if (ret & __NET_XMIT_BYPASS)
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 3b9245a3c767..96eb2f122973 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -77,12 +77,6 @@
#define SFQ_EMPTY_SLOT 0xffff
#define SFQ_DEFAULT_HASH_DIVISOR 1024
-/* We use 16 bits to store allot, and want to handle packets up to 64K
- * Scale allot by 8 (1<<3) so that no overflow occurs.
- */
-#define SFQ_ALLOT_SHIFT 3
-#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
-
/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
typedef u16 sfq_index;
@@ -104,7 +98,7 @@ struct sfq_slot {
sfq_index next; /* next slot in sfq RR chain */
struct sfq_head dep; /* anchor in dep[] chains */
unsigned short hash; /* hash value (index in ht[]) */
- short allot; /* credit for this slot */
+ int allot; /* credit for this slot */
unsigned int backlog;
struct red_vars vars;
@@ -120,7 +114,6 @@ struct sfq_sched_data {
siphash_key_t perturbation;
u8 cur_depth; /* depth of longest slot */
u8 flags;
- unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
struct tcf_proto __rcu *filter_list;
struct tcf_block *block;
sfq_index *ht; /* Hash table ('divisor' slots) */
@@ -317,7 +310,10 @@ drop:
/* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
x = q->tail->next;
slot = &q->slots[x];
- q->tail->next = slot->next;
+ if (slot->next == x)
+ q->tail = NULL; /* no more active slots */
+ else
+ q->tail->next = slot->next;
q->ht[slot->hash] = SFQ_EMPTY_SLOT;
goto drop;
}
@@ -456,7 +452,7 @@ enqueue:
*/
q->tail = slot;
/* We could use a bigger initial quantum for new flows */
- slot->allot = q->scaled_quantum;
+ slot->allot = q->quantum;
}
if (++sch->q.qlen <= q->limit)
return NET_XMIT_SUCCESS;
@@ -493,7 +489,7 @@ next_slot:
slot = &q->slots[a];
if (slot->allot <= 0) {
q->tail = slot;
- slot->allot += q->scaled_quantum;
+ slot->allot += q->quantum;
goto next_slot;
}
skb = slot_dequeue_head(slot);
@@ -512,7 +508,7 @@ next_slot:
}
q->tail->next = next_a;
} else {
- slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
+ slot->allot -= qdisc_pkt_len(skb);
}
return skb;
}
@@ -595,7 +591,7 @@ drop:
q->tail->next = x;
}
q->tail = slot;
- slot->allot = q->scaled_quantum;
+ slot->allot = q->quantum;
}
}
sch->q.qlen -= dropped;
@@ -604,7 +600,7 @@ drop:
static void sfq_perturbation(struct timer_list *t)
{
- struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
+ struct sfq_sched_data *q = timer_container_of(q, t, perturb_timer);
struct Qdisc *sch = q->sch;
spinlock_t *root_lock;
siphash_key_t nkey;
@@ -628,7 +624,8 @@ static void sfq_perturbation(struct timer_list *t)
rcu_read_unlock();
}
-static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
+static int sfq_change(struct Qdisc *sch, struct nlattr *opt,
+ struct netlink_ext_ack *extack)
{
struct sfq_sched_data *q = qdisc_priv(sch);
struct tc_sfq_qopt *ctl = nla_data(opt);
@@ -637,6 +634,15 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
struct red_parms *p = NULL;
struct sk_buff *to_free = NULL;
struct sk_buff *tail = NULL;
+ unsigned int maxflows;
+ unsigned int quantum;
+ unsigned int divisor;
+ int perturb_period;
+ u8 headdrop;
+ u8 maxdepth;
+ int limit;
+ u8 flags;
+
if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
return -EINVAL;
@@ -646,13 +652,17 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
(!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
return -EINVAL;
- /* slot->allot is a short, make sure quantum is not too big. */
- if (ctl->quantum) {
- unsigned int scaled = SFQ_ALLOT_SIZE(ctl->quantum);
+ if ((int)ctl->quantum < 0) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid quantum");
+ return -EINVAL;
+ }
- if (scaled <= 0 || scaled > SHRT_MAX)
- return -EINVAL;
+ if (ctl->perturb_period < 0 ||
+ ctl->perturb_period > INT_MAX / HZ) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid perturb period");
+ return -EINVAL;
}
+ perturb_period = ctl->perturb_period * HZ;
if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
ctl_v1->Wlog, ctl_v1->Scell_log, NULL))
@@ -662,37 +672,62 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
if (!p)
return -ENOMEM;
}
+
sch_tree_lock(sch);
- if (ctl->quantum) {
- q->quantum = ctl->quantum;
- q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
- }
- WRITE_ONCE(q->perturb_period, ctl->perturb_period * HZ);
+
+ limit = q->limit;
+ divisor = q->divisor;
+ headdrop = q->headdrop;
+ maxdepth = q->maxdepth;
+ maxflows = q->maxflows;
+ quantum = q->quantum;
+ flags = q->flags;
+
+ /* update and validate configuration */
+ if (ctl->quantum)
+ quantum = ctl->quantum;
if (ctl->flows)
- q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
+ maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
if (ctl->divisor) {
- q->divisor = ctl->divisor;
- q->maxflows = min_t(u32, q->maxflows, q->divisor);
+ divisor = ctl->divisor;
+ maxflows = min_t(u32, maxflows, divisor);
}
if (ctl_v1) {
if (ctl_v1->depth)
- q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
+ maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
if (p) {
- swap(q->red_parms, p);
- red_set_parms(q->red_parms,
+ red_set_parms(p,
ctl_v1->qth_min, ctl_v1->qth_max,
ctl_v1->Wlog,
ctl_v1->Plog, ctl_v1->Scell_log,
NULL,
ctl_v1->max_P);
}
- q->flags = ctl_v1->flags;
- q->headdrop = ctl_v1->headdrop;
+ flags = ctl_v1->flags;
+ headdrop = ctl_v1->headdrop;
}
if (ctl->limit) {
- q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
- q->maxflows = min_t(u32, q->maxflows, q->limit);
+ limit = min_t(u32, ctl->limit, maxdepth * maxflows);
+ maxflows = min_t(u32, maxflows, limit);
}
+ if (limit == 1) {
+ sch_tree_unlock(sch);
+ kfree(p);
+ NL_SET_ERR_MSG_MOD(extack, "invalid limit");
+ return -EINVAL;
+ }
+
+ /* commit configuration */
+ q->limit = limit;
+ q->divisor = divisor;
+ q->headdrop = headdrop;
+ q->maxdepth = maxdepth;
+ q->maxflows = maxflows;
+ WRITE_ONCE(q->perturb_period, perturb_period);
+ q->quantum = quantum;
+ q->flags = flags;
+ if (p)
+ swap(q->red_parms, p);
qlen = sch->q.qlen;
while (sch->q.qlen > q->limit) {
@@ -704,7 +739,7 @@ static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
rtnl_kfree_skbs(to_free, tail);
qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
- del_timer(&q->perturb_timer);
+ timer_delete(&q->perturb_timer);
if (q->perturb_period) {
mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
get_random_bytes(&q->perturbation, sizeof(q->perturbation));
@@ -730,7 +765,7 @@ static void sfq_destroy(struct Qdisc *sch)
tcf_block_put(q->block);
WRITE_ONCE(q->perturb_period, 0);
- del_timer_sync(&q->perturb_timer);
+ timer_delete_sync(&q->perturb_timer);
sfq_free(q->ht);
sfq_free(q->slots);
kfree(q->red_parms);
@@ -762,12 +797,11 @@ static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
q->maxflows = SFQ_DEFAULT_FLOWS;
q->quantum = psched_mtu(qdisc_dev(sch));
- q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
q->perturb_period = 0;
get_random_bytes(&q->perturbation, sizeof(q->perturbation));
if (opt) {
- int err = sfq_change(sch, opt);
+ int err = sfq_change(sch, opt, extack);
if (err)
return err;
}
@@ -878,7 +912,7 @@ static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
if (idx != SFQ_EMPTY_SLOT) {
const struct sfq_slot *slot = &q->slots[idx];
- xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
+ xstats.allot = slot->allot;
qs.qlen = slot->qlen;
qs.backlog = slot->backlog;
}
diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
index 20ff7386b74b..f485f62ab721 100644
--- a/net/sched/sch_skbprio.c
+++ b/net/sched/sch_skbprio.c
@@ -123,8 +123,6 @@ static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
/* Check to update highest and lowest priorities. */
if (skb_queue_empty(lp_qdisc)) {
if (q->lowest_prio == q->highest_prio) {
- /* The incoming packet is the only packet in queue. */
- BUG_ON(sch->q.qlen != 1);
q->lowest_prio = prio;
q->highest_prio = prio;
} else {
@@ -156,7 +154,6 @@ static struct sk_buff *skbprio_dequeue(struct Qdisc *sch)
/* Update highest priority field. */
if (skb_queue_empty(hpq)) {
if (q->lowest_prio == q->highest_prio) {
- BUG_ON(sch->q.qlen);
q->highest_prio = 0;
q->lowest_prio = SKBPRIO_MAX_PRIORITY - 1;
} else {
diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
index 8623dc0bafc0..2b14c81a87e5 100644
--- a/net/sched/sch_taprio.c
+++ b/net/sched/sch_taprio.c
@@ -1328,13 +1328,15 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
stab = rtnl_dereference(q->root->stab);
- oper = rtnl_dereference(q->oper_sched);
+ rcu_read_lock();
+ oper = rcu_dereference(q->oper_sched);
if (oper)
taprio_update_queue_max_sdu(q, oper, stab);
- admin = rtnl_dereference(q->admin_sched);
+ admin = rcu_dereference(q->admin_sched);
if (admin)
taprio_update_queue_max_sdu(q, admin, stab);
+ rcu_read_unlock();
break;
}
@@ -1828,7 +1830,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
* zero; (2) the 'flags' of a "running" taprio instance cannot be
* changed.
*/
- taprio_flags = tb[TCA_TAPRIO_ATTR_FLAGS] ? nla_get_u32(tb[TCA_TAPRIO_ATTR_FLAGS]) : 0;
+ taprio_flags = nla_get_u32_default(tb[TCA_TAPRIO_ATTR_FLAGS], 0);
/* txtime-assist and full offload are mutually exclusive */
if ((taprio_flags & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST) &&
@@ -1932,8 +1934,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
if (!TXTIME_ASSIST_IS_ENABLED(q->flags) &&
!FULL_OFFLOAD_IS_ENABLED(q->flags) &&
!hrtimer_active(&q->advance_timer)) {
- hrtimer_init(&q->advance_timer, q->clockid, HRTIMER_MODE_ABS);
- q->advance_timer.function = advance_sched;
+ hrtimer_setup(&q->advance_timer, advance_sched, q->clockid, HRTIMER_MODE_ABS);
}
err = taprio_get_start_time(sch, new_admin, &start);
@@ -2056,8 +2057,7 @@ static int taprio_init(struct Qdisc *sch, struct nlattr *opt,
spin_lock_init(&q->current_entry_lock);
- hrtimer_init(&q->advance_timer, CLOCK_TAI, HRTIMER_MODE_ABS);
- q->advance_timer.function = advance_sched;
+ hrtimer_setup(&q->advance_timer, advance_sched, CLOCK_TAI, HRTIMER_MODE_ABS);
q->root = sch;
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index f1d09183ae63..4c977f049670 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -208,7 +208,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
struct tbf_sched_data *q = qdisc_priv(sch);
struct sk_buff *segs, *nskb;
netdev_features_t features = netif_skb_features(skb);
- unsigned int len = 0, prev_len = qdisc_pkt_len(skb);
+ unsigned int len = 0, prev_len = qdisc_pkt_len(skb), seg_len;
int ret, nb;
segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
@@ -219,21 +219,27 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
nb = 0;
skb_list_walk_safe(segs, segs, nskb) {
skb_mark_not_on_list(segs);
- qdisc_skb_cb(segs)->pkt_len = segs->len;
- len += segs->len;
+ seg_len = segs->len;
+ qdisc_skb_cb(segs)->pkt_len = seg_len;
ret = qdisc_enqueue(segs, q->qdisc, to_free);
if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret))
qdisc_qstats_drop(sch);
} else {
nb++;
+ len += seg_len;
}
}
sch->q.qlen += nb;
- if (nb > 1)
+ sch->qstats.backlog += len;
+ if (nb > 0) {
qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
- consume_skb(skb);
- return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
+ consume_skb(skb);
+ return NET_XMIT_SUCCESS;
+ }
+
+ kfree_skb(skb);
+ return NET_XMIT_DROP;
}
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
@@ -446,7 +452,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch);
if (child) {
- qdisc_tree_flush_backlog(q->qdisc);
+ qdisc_purge_queue(q->qdisc);
old = q->qdisc;
q->qdisc = child;
}