diff options
Diffstat (limited to 'net/core/sock.c')
| -rw-r--r-- | net/core/sock.c | 75 | 
1 files changed, 70 insertions, 5 deletions
diff --git a/net/core/sock.c b/net/core/sock.c index 9e5b71fda6ec..8f67ced8d6a8 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -142,7 +142,7 @@  static DEFINE_MUTEX(proto_list_mutex);  static LIST_HEAD(proto_list); -#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM +#ifdef CONFIG_MEMCG_KMEM  int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)  {  	struct proto *proto; @@ -271,6 +271,61 @@ __u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;  int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);  EXPORT_SYMBOL(sysctl_optmem_max); +struct static_key memalloc_socks = STATIC_KEY_INIT_FALSE; +EXPORT_SYMBOL_GPL(memalloc_socks); + +/** + * sk_set_memalloc - sets %SOCK_MEMALLOC + * @sk: socket to set it on + * + * Set %SOCK_MEMALLOC on a socket for access to emergency reserves. + * It's the responsibility of the admin to adjust min_free_kbytes + * to meet the requirements + */ +void sk_set_memalloc(struct sock *sk) +{ +	sock_set_flag(sk, SOCK_MEMALLOC); +	sk->sk_allocation |= __GFP_MEMALLOC; +	static_key_slow_inc(&memalloc_socks); +} +EXPORT_SYMBOL_GPL(sk_set_memalloc); + +void sk_clear_memalloc(struct sock *sk) +{ +	sock_reset_flag(sk, SOCK_MEMALLOC); +	sk->sk_allocation &= ~__GFP_MEMALLOC; +	static_key_slow_dec(&memalloc_socks); + +	/* +	 * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward +	 * progress of swapping. However, if SOCK_MEMALLOC is cleared while +	 * it has rmem allocations there is a risk that the user of the +	 * socket cannot make forward progress due to exceeding the rmem +	 * limits. By rights, sk_clear_memalloc() should only be called +	 * on sockets being torn down but warn and reset the accounting if +	 * that assumption breaks. +	 */ +	if (WARN_ON(sk->sk_forward_alloc)) +		sk_mem_reclaim(sk); +} +EXPORT_SYMBOL_GPL(sk_clear_memalloc); + +int __sk_backlog_rcv(struct sock *sk, struct sk_buff *skb) +{ +	int ret; +	unsigned long pflags = current->flags; + +	/* these should have been dropped before queueing */ +	BUG_ON(!sock_flag(sk, SOCK_MEMALLOC)); + +	current->flags |= PF_MEMALLOC; +	ret = sk->sk_backlog_rcv(sk, skb); +	tsk_restore_flags(current, pflags, PF_MEMALLOC); + +	return ret; +} +EXPORT_SYMBOL(__sk_backlog_rcv); +  #if defined(CONFIG_CGROUPS)  #if !defined(CONFIG_NET_CLS_CGROUP)  int net_cls_subsys_id = -1; @@ -353,7 +408,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)  	if (err)  		return err; -	if (!sk_rmem_schedule(sk, skb->truesize)) { +	if (!sk_rmem_schedule(sk, skb, skb->truesize)) {  		atomic_inc(&sk->sk_drops);  		return -ENOBUFS;  	} @@ -1180,12 +1235,12 @@ void sock_update_classid(struct sock *sk)  }  EXPORT_SYMBOL(sock_update_classid); -void sock_update_netprioidx(struct sock *sk) +void sock_update_netprioidx(struct sock *sk, struct task_struct *task)  {  	if (in_interrupt())  		return; -	sk->sk_cgrp_prioidx = task_netprioidx(current); +	sk->sk_cgrp_prioidx = task_netprioidx(task);  }  EXPORT_SYMBOL_GPL(sock_update_netprioidx);  #endif @@ -1215,7 +1270,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,  		atomic_set(&sk->sk_wmem_alloc, 1);  		sock_update_classid(sk); -		sock_update_netprioidx(sk); +		sock_update_netprioidx(sk, current);  	}  	return sk; @@ -1403,6 +1458,7 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)  		} else {  			sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;  			sk->sk_gso_max_size = dst->dev->gso_max_size; +			sk->sk_gso_max_segs = dst->dev->gso_max_segs;  		}  	}  } @@ -1465,6 +1521,11 @@ void sock_rfree(struct sk_buff *skb)  }  EXPORT_SYMBOL(sock_rfree); +void sock_edemux(struct sk_buff *skb) +{ +	sock_put(skb->sk); +} +EXPORT_SYMBOL(sock_edemux);  int sock_i_uid(struct sock *sk)  { @@ -2154,6 +2215,10 @@ void release_sock(struct sock *sk)  	spin_lock_bh(&sk->sk_lock.slock);  	if (sk->sk_backlog.tail)  		__release_sock(sk); + +	if (sk->sk_prot->release_cb) +		sk->sk_prot->release_cb(sk); +  	sk->sk_lock.owned = 0;  	if (waitqueue_active(&sk->sk_lock.wq))  		wake_up(&sk->sk_lock.wq);  | 
