summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-09-29 17:42:52 +0300
committerDavid S. Miller <davem@davemloft.net>2015-09-30 02:53:10 +0300
commit0536fcc039a8926ec12ec587f41a83f7acafeb82 (patch)
tree335da15ef581d750ada2ba11e6ca6900ba82ae86 /include
parent2985aaac010ebd5e562ce1a22cc61acbb0e40cf2 (diff)
downloadlinux-0536fcc039a8926ec12ec587f41a83f7acafeb82.tar.xz
tcp: prepare fastopen code for upcoming listener changes
While auditing TCP stack for upcoming 'lockless' listener changes, I found I had to change fastopen_init_queue() to properly init the object before publishing it. Otherwise an other cpu could try to lock the spinlock before it gets properly initialized. Instead of adding appropriate barriers, just remove dynamic memory allocations : - Structure is 28 bytes on 64bit arches. Using additional 8 bytes for holding a pointer seems overkill. - Two listeners can share same cache line and performance would suffer. If we really want to save few bytes, we would instead dynamically allocate whole struct request_sock_queue in the future. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/tcp.h22
-rw-r--r--include/net/request_sock.h7
2 files changed, 6 insertions, 23 deletions
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fcb573be75d9..e442e6e9a365 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -382,25 +382,11 @@ static inline bool tcp_passive_fastopen(const struct sock *sk)
tcp_sk(sk)->fastopen_rsk != NULL);
}
-extern void tcp_sock_destruct(struct sock *sk);
-
-static inline int fastopen_init_queue(struct sock *sk, int backlog)
+static inline void fastopen_queue_tune(struct sock *sk, int backlog)
{
- struct request_sock_queue *queue =
- &inet_csk(sk)->icsk_accept_queue;
-
- if (queue->fastopenq == NULL) {
- queue->fastopenq = kzalloc(
- sizeof(struct fastopen_queue),
- sk->sk_allocation);
- if (queue->fastopenq == NULL)
- return -ENOMEM;
-
- sk->sk_destruct = tcp_sock_destruct;
- spin_lock_init(&queue->fastopenq->lock);
- }
- queue->fastopenq->max_qlen = backlog;
- return 0;
+ struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+
+ queue->fastopenq.max_qlen = backlog;
}
static inline void tcp_saved_syn_free(struct tcp_sock *tp)
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index c146b5284786..d2544de329bd 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -180,11 +180,8 @@ struct request_sock_queue {
struct request_sock *rskq_accept_tail;
u8 rskq_defer_accept;
struct listen_sock *listen_opt;
- struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
- * enabled on this listener. Check
- * max_qlen != 0 in fastopen_queue
- * to determine if TFO is enabled
- * right at this moment.
+ struct fastopen_queue fastopenq; /* Check max_qlen != 0 to determine
+ * if TFO is enabled.
*/
/* temporary alignment, our goal is to get rid of this lock */