diff options
author | Eric Dumazet <edumazet@google.com> | 2019-02-12 23:26:27 +0300 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2019-12-05 17:37:58 +0300 |
commit | 75fa3a9a2f7b6daf266c6cbbecdd83fbcb9dad6b (patch) | |
tree | 05f56b32bc0539c8d9cc47712ed68d2b5f77c399 | |
parent | 50740980d526e97eea76ed0ff54b1935ed971565 (diff) | |
download | linux-75fa3a9a2f7b6daf266c6cbbecdd83fbcb9dad6b.tar.xz |
net: fix possible overflow in __sk_mem_raise_allocated()
[ Upstream commit 5bf325a53202b8728cf7013b72688c46071e212e ]
With many active TCP sockets, fat TCP sockets could fool
__sk_mem_raise_allocated() thanks to an overflow.
They would increase their share of the memory, instead
of decreasing it.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Sasha Levin <sashal@kernel.org>
-rw-r--r-- | include/net/sock.h | 2 | ||||
-rw-r--r-- | net/core/sock.c | 2 |
2 files changed, 2 insertions, 2 deletions
diff --git a/include/net/sock.h b/include/net/sock.h index 780c6c0a86f0..0af46cbd3649 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1232,7 +1232,7 @@ static inline void sk_sockets_allocated_inc(struct sock *sk) percpu_counter_inc(sk->sk_prot->sockets_allocated); } -static inline int +static inline u64 sk_sockets_allocated_read_positive(struct sock *sk) { return percpu_counter_read_positive(sk->sk_prot->sockets_allocated); diff --git a/net/core/sock.c b/net/core/sock.c index 7ccbcd853cbc..90ccbbf9e6b0 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2357,7 +2357,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind) } if (sk_has_memory_pressure(sk)) { - int alloc; + u64 alloc; if (!sk_under_memory_pressure(sk)) return 1; |