summaryrefslogtreecommitdiff
path: root/net/sched/sch_red.c
diff options
context:
space:
mode:
authorPetr Machata <petrm@mellanox.com>2020-07-14 20:03:07 +0300
committerJakub Kicinski <kuba@kernel.org>2020-07-17 02:48:34 +0300
commit55f656cdb851bae32980d83d2494201e79d93b63 (patch)
tree6666f9802c95b192dc998a860b6c840d0a74a321 /net/sched/sch_red.c
parent89e35f66d552c98c1cfee4a058de158d7f21796a (diff)
downloadlinux-55f656cdb851bae32980d83d2494201e79d93b63.tar.xz
net: sched: Do not drop root lock in tcf_qevent_handle()
Mirred currently does not mix well with blocks executed after the qdisc root lock is taken. This includes classification blocks (such as in PRIO, ETS, DRR qdiscs) and qevents. The locking caused by the packet mirrored by mirred can cause deadlocks: either when the thread of execution attempts to take the lock a second time, or when two threads end up waiting on each other's locks. The qevent patchset attempted to not introduce further badness of this sort, and dropped the lock before executing the qevent block. However this lead to too little locking and races between qdisc configuration and packet enqueue in the RED qdisc. Before the deadlock issues are solved in a way that can be applied across many qdiscs reasonably easily, do for qevents what is done for the classification blocks and just keep holding the root lock. Signed-off-by: Petr Machata <petrm@mellanox.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/sched/sch_red.c')
-rw-r--r--net/sched/sch_red.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index de2be4d04ed6..a79602f7fab8 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,7 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
if (INET_ECN_set_ce(skb)) {
q->stats.prob_mark++;
- skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
+ skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;
} else if (!red_use_nodrop(q)) {
@@ -114,7 +114,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
if (INET_ECN_set_ce(skb)) {
q->stats.forced_mark++;
- skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
+ skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;
} else if (!red_use_nodrop(q)) {
@@ -137,7 +137,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
return ret;
congestion_drop:
- skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, root_lock, to_free, &ret);
+ skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
if (!skb)
return NET_XMIT_CN | ret;