summaryrefslogtreecommitdiff
path: root/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2019-10-24 08:44:51 +0300
committerDavid S. Miller <davem@davemloft.net>2019-10-28 23:33:41 +0300
commit3f926af3f4d688e2e11e7f8ed04e277a14d4d4a4 (patch)
tree3cdc7bbab6fc9caa1282afe98eddd91add1aabd7 /net
parent3ef7cf57c72f32f61e97f8fa401bc39ea1f1a5d4 (diff)
downloadlinux-3f926af3f4d688e2e11e7f8ed04e277a14d4d4a4.tar.xz
net: use skb_queue_empty_lockless() in busy poll contexts
Busy polling usually runs without locks. Let's use skb_queue_empty_lockless() instead of skb_queue_empty() Also uses READ_ONCE() in __skb_try_recv_datagram() to address a similar potential problem. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/sock.c2
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/sctp/socket.c2
4 files changed, 4 insertions, 4 deletions
diff --git a/net/core/datagram.c b/net/core/datagram.c
index 5b685e110aff..03515e46a49a 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -278,7 +278,7 @@ struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned int flags,
break;
sk_busy_loop(sk, flags & MSG_DONTWAIT);
- } while (sk->sk_receive_queue.prev != *last);
+ } while (READ_ONCE(sk->sk_receive_queue.prev) != *last);
error = -EAGAIN;
diff --git a/net/core/sock.c b/net/core/sock.c
index a515392ba84b..b8e758bcb6ad 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -3600,7 +3600,7 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
{
struct sock *sk = p;
- return !skb_queue_empty(&sk->sk_receive_queue) ||
+ return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
sk_busy_loop_timeout(sk, start_time);
}
EXPORT_SYMBOL(sk_busy_loop_end);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index ffef502f5292..d8876f0e9672 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1964,7 +1964,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
if (unlikely(flags & MSG_ERRQUEUE))
return inet_recv_error(sk, msg, len, addr_len);
- if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
+ if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) &&
(sk->sk_state == TCP_ESTABLISHED))
sk_busy_loop(sk, nonblock);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index cfb25391b8b0..ca81e06df165 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -8871,7 +8871,7 @@ struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
if (sk_can_busy_loop(sk)) {
sk_busy_loop(sk, noblock);
- if (!skb_queue_empty(&sk->sk_receive_queue))
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
continue;
}