diff options
author | Eric Dumazet <edumazet@google.com> | 2015-10-02 21:43:33 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-10-03 14:32:42 +0300 |
commit | 81b496b31a4331415b6a644b485a329ec0b45155 (patch) | |
tree | 0412d4d8c3d58625f838ad3fb6f7f044bfd9fcc4 /net | |
parent | 079096f103faca2dd87342cca6f23d4b34da8871 (diff) | |
download | linux-81b496b31a4331415b6a644b485a329ec0b45155.tar.xz |
tcp/dccp: shrink struct listen_sock
We no longer use hash_rnd, nr_table_entries and syn_table[]
For a listener with a backlog of 10 millions sockets, this
saves 80 MBytes of vmalloced memory.
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/core/request_sock.c | 14 |
1 files changed, 3 insertions, 11 deletions
diff --git a/net/core/request_sock.c b/net/core/request_sock.c index a4b305d8ca2b..124f61c5bfef 100644 --- a/net/core/request_sock.c +++ b/net/core/request_sock.c @@ -46,18 +46,11 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog); nr_table_entries = max_t(u32, nr_table_entries, 8); nr_table_entries = roundup_pow_of_two(nr_table_entries + 1); - lopt_size += nr_table_entries * sizeof(struct request_sock *); - if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) - lopt = kzalloc(lopt_size, GFP_KERNEL | - __GFP_NOWARN | - __GFP_NORETRY); - if (!lopt) - lopt = vzalloc(lopt_size); + lopt = kzalloc(lopt_size, GFP_KERNEL); if (!lopt) return -ENOMEM; - get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd)); spin_lock_init(&queue->rskq_lock); spin_lock_init(&queue->syn_wait_lock); @@ -68,7 +61,6 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, queue->fastopenq.max_qlen = 0; queue->rskq_accept_head = NULL; - lopt->nr_table_entries = nr_table_entries; lopt->max_qlen_log = ilog2(nr_table_entries); spin_lock_bh(&queue->syn_wait_lock); @@ -81,7 +73,7 @@ int reqsk_queue_alloc(struct request_sock_queue *queue, void __reqsk_queue_destroy(struct request_sock_queue *queue) { /* This is an error recovery path only, no locking needed */ - kvfree(queue->listen_opt); + kfree(queue->listen_opt); } static inline struct listen_sock *reqsk_queue_yank_listen_sk( @@ -102,7 +94,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue) struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); /* cleaning is done by req timers */ - kvfree(lopt); + kfree(lopt); } /* |