diff options
Diffstat (limited to 'net/ipv4/tcp.c')
| -rw-r--r-- | net/ipv4/tcp.c | 27 | 
1 files changed, 11 insertions, 16 deletions
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 6e5617b9f9db..c4638e6f0238 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -288,9 +288,11 @@ int sysctl_tcp_min_tso_segs __read_mostly = 2;  struct percpu_counter tcp_orphan_count;  EXPORT_SYMBOL_GPL(tcp_orphan_count); +long sysctl_tcp_mem[3] __read_mostly;  int sysctl_tcp_wmem[3] __read_mostly;  int sysctl_tcp_rmem[3] __read_mostly; +EXPORT_SYMBOL(sysctl_tcp_mem);  EXPORT_SYMBOL(sysctl_tcp_rmem);  EXPORT_SYMBOL(sysctl_tcp_wmem); @@ -806,12 +808,6 @@ static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,  		xmit_size_goal = min_t(u32, gso_size,  				       sk->sk_gso_max_size - 1 - hlen); -		/* TSQ : try to have at least two segments in flight -		 * (one in NIC TX ring, another in Qdisc) -		 */ -		xmit_size_goal = min_t(u32, xmit_size_goal, -				       sysctl_tcp_limit_output_bytes >> 1); -  		xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);  		/* We try hard to avoid divides here */ @@ -1429,7 +1425,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)  	do {  		if (dma_async_is_tx_complete(tp->ucopy.dma_chan,  					      last_issued, &done, -					      &used) == DMA_SUCCESS) { +					      &used) == DMA_COMPLETE) {  			/* Safe to free early-copied skbs now */  			__skb_queue_purge(&sk->sk_async_wait_queue);  			break; @@ -1437,7 +1433,7 @@ static void tcp_service_net_dma(struct sock *sk, bool wait)  			struct sk_buff *skb;  			while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&  			       (dma_async_is_complete(skb->dma_cookie, done, -						      used) == DMA_SUCCESS)) { +						      used) == DMA_COMPLETE)) {  				__skb_dequeue(&sk->sk_async_wait_queue);  				kfree_skb(skb);  			} @@ -3097,13 +3093,13 @@ static int __init set_thash_entries(char *str)  }  __setup("thash_entries=", set_thash_entries); -void tcp_init_mem(struct net *net) +static void tcp_init_mem(void)  {  	unsigned long limit = nr_free_buffer_pages() / 8;  	limit = max(limit, 128UL); -	net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; -	net->ipv4.sysctl_tcp_mem[1] = limit; -	net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2; +	sysctl_tcp_mem[0] = limit / 4 * 3; +	sysctl_tcp_mem[1] = limit; +	sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;  }  void __init tcp_init(void) @@ -3137,10 +3133,9 @@ void __init tcp_init(void)  					&tcp_hashinfo.ehash_mask,  					0,  					thash_entries ? 0 : 512 * 1024); -	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { +	for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)  		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); -		INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); -	} +  	if (inet_ehash_locks_alloc(&tcp_hashinfo))  		panic("TCP: failed to alloc ehash_locks");  	tcp_hashinfo.bhash = @@ -3166,7 +3161,7 @@ void __init tcp_init(void)  	sysctl_tcp_max_orphans = cnt / 2;  	sysctl_max_syn_backlog = max(128, cnt / 256); -	tcp_init_mem(&init_net); +	tcp_init_mem();  	/* Set per-socket limits to no more than 1/128 the pressure threshold */  	limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);  	max_wshare = min(4UL*1024*1024, limit);  | 
