diff options
author | Eric Dumazet <edumazet@google.com> | 2019-10-24 08:44:48 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-11-10 13:34:43 +0300 |
commit | c7dba6a99aec3956f5c5b2c6a4cd1bf5fdc36a5e (patch) | |
tree | a515fc37b2fb179aca385de4d669fd8b9fce4f6c /include/linux/skbuff.h | |
parent | 71e92518a9cea75a2e0d6dd8d74684bce9e4b231 (diff) | |
download | linux-c7dba6a99aec3956f5c5b2c6a4cd1bf5fdc36a5e.tar.xz |
net: add skb_queue_empty_lockless()
[ Upstream commit d7d16a89350ab263484c0aa2b523dd3a234e4a80 ]
Some paths call skb_queue_empty() without holding
the queue lock. We must use a barrier in order
to not let the compiler do strange things, and avoid
KCSAN splats.
Adding a barrier in skb_queue_empty() might be overkill,
I prefer adding a new helper to clearly identify
points where the callers might be lockless. This might
help us finding real bugs.
The corresponding WRITE_ONCE() should add zero cost
for current compilers.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'include/linux/skbuff.h')
-rw-r--r-- | include/linux/skbuff.h | 33 |
1 files changed, 24 insertions, 9 deletions
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 9b18d33681c2..39a7cecaa880 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1501,6 +1501,19 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) } /** + * skb_queue_empty_lockless - check if a queue is empty + * @list: queue head + * + * Returns true if the queue is empty, false otherwise. + * This variant can be used in lockless contexts. + */ +static inline bool skb_queue_empty_lockless(const struct sk_buff_head *list) +{ + return READ_ONCE(list->next) == (const struct sk_buff *) list; +} + + +/** * skb_queue_is_last - check if skb is the last entry in the queue * @list: queue head * @skb: buffer @@ -1853,9 +1866,11 @@ static inline void __skb_insert(struct sk_buff *newsk, struct sk_buff *prev, struct sk_buff *next, struct sk_buff_head *list) { - newsk->next = next; - newsk->prev = prev; - next->prev = prev->next = newsk; + /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */ + WRITE_ONCE(newsk->next, next); + WRITE_ONCE(newsk->prev, prev); + WRITE_ONCE(next->prev, newsk); + WRITE_ONCE(prev->next, newsk); list->qlen++; } @@ -1866,11 +1881,11 @@ static inline void __skb_queue_splice(const struct sk_buff_head *list, struct sk_buff *first = list->next; struct sk_buff *last = list->prev; - first->prev = prev; - prev->next = first; + WRITE_ONCE(first->prev, prev); + WRITE_ONCE(prev->next, first); - last->next = next; - next->prev = last; + WRITE_ONCE(last->next, next); + WRITE_ONCE(next->prev, last); } /** @@ -2011,8 +2026,8 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) next = skb->next; prev = skb->prev; skb->next = skb->prev = NULL; - next->prev = prev; - prev->next = next; + WRITE_ONCE(next->prev, prev); + WRITE_ONCE(prev->next, next); } /** |