diff options
author | D. Wythe <alibuda@linux.alibaba.com> | 2022-02-10 12:11:34 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2022-02-11 14:14:57 +0300 |
commit | 3079e342d2655c127dc700bfc1ea67382dff455c (patch) | |
tree | bdfa206a6866d4b57dbe2aeed6d6ffe411a1b0b1 /net/smc | |
parent | 429c83c78ab213d9656721cf46ae05366098485c (diff) | |
download | linux-3079e342d2655c127dc700bfc1ea67382dff455c.tar.xz |
net/smc: Make smc_tcp_listen_work() independent
In multithread and 10K connections benchmark, the backend TCP connection
established very slowly, and lots of TCP connections stay in SYN_SENT
state.
Client: smc_run wrk -c 10000 -t 4 http://server
the netstate of server host shows like:
145042 times the listen queue of a socket overflowed
145042 SYNs to LISTEN sockets dropped
One reason of this issue is that, since the smc_tcp_listen_work() shared
the same workqueue (smc_hs_wq) with smc_listen_work(), while the
smc_listen_work() do blocking wait for smc connection established. Once
the workqueue became congested, it's will block the accept() from TCP
listen.
This patch creates a independent workqueue(smc_tcp_ls_wq) for
smc_tcp_listen_work(), separate it from smc_listen_work(), which is
quite acceptable considering that smc_tcp_listen_work() runs very fast.
Signed-off-by: D. Wythe <alibuda@linux.alibaba.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc')
-rw-r--r-- | net/smc/af_smc.c | 13 |
1 files changed, 11 insertions, 2 deletions
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index 00b2e9deabb0..4969ac8029a9 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -59,6 +59,7 @@ static DEFINE_MUTEX(smc_client_lgr_pending); /* serialize link group * creation on client */ +static struct workqueue_struct *smc_tcp_ls_wq; /* wq for tcp listen work */ struct workqueue_struct *smc_hs_wq; /* wq for handshake work */ struct workqueue_struct *smc_close_wq; /* wq for close work */ @@ -2227,7 +2228,7 @@ static void smc_clcsock_data_ready(struct sock *listen_clcsock) lsmc->clcsk_data_ready(listen_clcsock); if (lsmc->sk.sk_state == SMC_LISTEN) { sock_hold(&lsmc->sk); /* sock_put in smc_tcp_listen_work() */ - if (!queue_work(smc_hs_wq, &lsmc->tcp_listen_work)) + if (!queue_work(smc_tcp_ls_wq, &lsmc->tcp_listen_work)) sock_put(&lsmc->sk); } } @@ -3024,9 +3025,14 @@ static int __init smc_init(void) goto out_nl; rc = -ENOMEM; + + smc_tcp_ls_wq = alloc_workqueue("smc_tcp_ls_wq", 0, 0); + if (!smc_tcp_ls_wq) + goto out_pnet; + smc_hs_wq = alloc_workqueue("smc_hs_wq", 0, 0); if (!smc_hs_wq) - goto out_pnet; + goto out_alloc_tcp_ls_wq; smc_close_wq = alloc_workqueue("smc_close_wq", 0, 0); if (!smc_close_wq) @@ -3097,6 +3103,8 @@ out_alloc_wqs: destroy_workqueue(smc_close_wq); out_alloc_hs_wq: destroy_workqueue(smc_hs_wq); +out_alloc_tcp_ls_wq: + destroy_workqueue(smc_tcp_ls_wq); out_pnet: smc_pnet_exit(); out_nl: @@ -3115,6 +3123,7 @@ static void __exit smc_exit(void) smc_core_exit(); smc_ib_unregister_client(); destroy_workqueue(smc_close_wq); + destroy_workqueue(smc_tcp_ls_wq); destroy_workqueue(smc_hs_wq); proto_unregister(&smc_proto6); proto_unregister(&smc_proto); |