diff options
author | Eric Dumazet <edumazet@google.com> | 2016-11-17 01:54:50 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-11-17 02:32:02 +0300 |
commit | 89c4b442b78bdba388337cc746fe63caba85f46c (patch) | |
tree | 825a8d6e04cdc937950e9d8a79b52eeb249236c3 /net/core | |
parent | 1629dd4f763cc15ac3b2711ac65dab153b738c6d (diff) | |
download | linux-89c4b442b78bdba388337cc746fe63caba85f46c.tar.xz |
netpoll: more efficient locking
Callers of netpoll_poll_lock() own NAPI_STATE_SCHED
Callers of netpoll_poll_unlock() have BH blocked between
the NAPI_STATE_SCHED being cleared and poll_lock is released.
We can avoid the spinlock which has no contention, and use cmpxchg()
on poll_owner which we need to set anyway.
This removes a possible lockdep violation after the cited commit,
since sk_busy_loop() re-enables BH before calling busy_poll_stop()
Fixes: 217f69743681 ("net: busy-poll: allow preemption in sk_busy_loop()")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 1 | ||||
-rw-r--r-- | net/core/netpoll.c | 6 |
2 files changed, 3 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index edba9efeb2e9..f71b34ab57a5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -5143,7 +5143,6 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi, list_add(&napi->dev_list, &dev->napi_list); napi->dev = dev; #ifdef CONFIG_NETPOLL - spin_lock_init(&napi->poll_lock); napi->poll_owner = -1; #endif set_bit(NAPI_STATE_SCHED, &napi->state); diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 53599bd0c82d..9424673009c1 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -171,12 +171,12 @@ static void poll_one_napi(struct napi_struct *napi) static void poll_napi(struct net_device *dev) { struct napi_struct *napi; + int cpu = smp_processor_id(); list_for_each_entry(napi, &dev->napi_list, dev_list) { - if (napi->poll_owner != smp_processor_id() && - spin_trylock(&napi->poll_lock)) { + if (cmpxchg(&napi->poll_owner, -1, cpu) == -1) { poll_one_napi(napi); - spin_unlock(&napi->poll_lock); + smp_store_release(&napi->poll_owner, -1); } } } |