summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@virtuozzo.com>2018-03-22 12:45:40 +0300
committerDavid S. Miller <davem@davemloft.net>2018-03-22 22:12:56 +0300
commitd9ff3049739e349b5380b96226f9ad766741773d (patch)
tree0b09f0de0ff410fef91ad0224a241b322bd0eb05 /net/ipv4
parent5796ef75ec7b6019eac88f66751d663d537a5cd3 (diff)
downloadlinux-d9ff3049739e349b5380b96226f9ad766741773d.tar.xz
net: Replace ip_ra_lock with per-net mutex
Since ra_chain is per-net, we may use per-net mutexes to protect them in ip_ra_control(). This improves scalability. Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/ip_sockglue.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index f36d35fe924b..5ad2d8ed3a3f 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -322,9 +322,6 @@ int ip_cmsg_send(struct sock *sk, struct msghdr *msg, struct ipcm_cookie *ipc,
return 0;
}
-static DEFINE_SPINLOCK(ip_ra_lock);
-
-
static void ip_ra_destroy_rcu(struct rcu_head *head)
{
struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
@@ -345,21 +342,21 @@ int ip_ra_control(struct sock *sk, unsigned char on,
new_ra = on ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL;
- spin_lock_bh(&ip_ra_lock);
+ mutex_lock(&net->ipv4.ra_mutex);
for (rap = &net->ipv4.ra_chain;
(ra = rcu_dereference_protected(*rap,
- lockdep_is_held(&ip_ra_lock))) != NULL;
+ lockdep_is_held(&net->ipv4.ra_mutex))) != NULL;
rap = &ra->next) {
if (ra->sk == sk) {
if (on) {
- spin_unlock_bh(&ip_ra_lock);
+ mutex_unlock(&net->ipv4.ra_mutex);
kfree(new_ra);
return -EADDRINUSE;
}
/* dont let ip_call_ra_chain() use sk again */
ra->sk = NULL;
RCU_INIT_POINTER(*rap, ra->next);
- spin_unlock_bh(&ip_ra_lock);
+ mutex_unlock(&net->ipv4.ra_mutex);
if (ra->destructor)
ra->destructor(sk);
@@ -374,7 +371,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
}
}
if (!new_ra) {
- spin_unlock_bh(&ip_ra_lock);
+ mutex_unlock(&net->ipv4.ra_mutex);
return -ENOBUFS;
}
new_ra->sk = sk;
@@ -383,7 +380,7 @@ int ip_ra_control(struct sock *sk, unsigned char on,
RCU_INIT_POINTER(new_ra->next, ra);
rcu_assign_pointer(*rap, new_ra);
sock_hold(sk);
- spin_unlock_bh(&ip_ra_lock);
+ mutex_unlock(&net->ipv4.ra_mutex);
return 0;
}