diff options
author | Jiri Kosina <jkosina@suse.cz> | 2013-09-04 12:49:39 +0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2013-09-04 12:49:57 +0400 |
commit | efd15f5f4ff63f6ac5d80850686e3d2cc8c4481b (patch) | |
tree | 40024adbe77a3d660662e639fd765097133d648c /include/net/sock.h | |
parent | 6c2794a2984f4c17a58117a68703cc7640f01c5a (diff) | |
parent | 58c59bc997d86593f0bea41845885917cf304d22 (diff) | |
download | linux-efd15f5f4ff63f6ac5d80850686e3d2cc8c4481b.tar.xz |
Merge branch 'master' into for-3.12/upstream
Sync with Linus' tree to be able to apply fixup patch on top
of 9d9a04ee75 ("HID: apple: Add support for the 2013 Macbook Air")
Signed-off-by: Jiri Kosina <jkosina@suse.cz>
Diffstat (limited to 'include/net/sock.h')
-rw-r--r-- | include/net/sock.h | 21 |
1 files changed, 15 insertions, 6 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 66772cf8c3c5..31d5cfbb51ec 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -229,6 +229,8 @@ struct cg_proto; * @sk_omem_alloc: "o" is "option" or "other" * @sk_wmem_queued: persistent queue size * @sk_forward_alloc: space allocated forward + * @sk_napi_id: id of the last napi context to receive data for sk + * @sk_ll_usec: usecs to busypoll when there is no data * @sk_allocation: allocation mode * @sk_sndbuf: size of send buffer in bytes * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE, @@ -325,6 +327,10 @@ struct sock { #ifdef CONFIG_RPS __u32 sk_rxhash; #endif +#ifdef CONFIG_NET_RX_BUSY_POLL + unsigned int sk_napi_id; + unsigned int sk_ll_usec; +#endif atomic_t sk_drops; int sk_rcvbuf; @@ -2041,18 +2047,21 @@ static inline void sk_wake_async(struct sock *sk, int how, int band) sock_wake_async(sk->sk_socket, how, band); } -#define SOCK_MIN_SNDBUF 2048 -/* - * Since sk_rmem_alloc sums skb->truesize, even a small frame might need - * sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak +/* Since sk_{r,w}mem_alloc sums skb->truesize, even a small frame might + * need sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak. + * Note: for send buffers, TCP works better if we can build two skbs at + * minimum. */ -#define SOCK_MIN_RCVBUF (2048 + sizeof(struct sk_buff)) +#define TCP_SKB_MIN_TRUESIZE (2048 + SKB_DATA_ALIGN(sizeof(struct sk_buff))) + +#define SOCK_MIN_SNDBUF (TCP_SKB_MIN_TRUESIZE * 2) +#define SOCK_MIN_RCVBUF TCP_SKB_MIN_TRUESIZE static inline void sk_stream_moderate_sndbuf(struct sock *sk) { if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) { sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); - sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF); + sk->sk_sndbuf = max_t(u32, sk->sk_sndbuf, SOCK_MIN_SNDBUF); } } |