diff options
author | Trond Myklebust <trond.myklebust@primarydata.com> | 2017-04-28 17:52:42 +0300 |
---|---|---|
committer | Trond Myklebust <trond.myklebust@hammerspace.com> | 2019-07-06 21:54:49 +0300 |
commit | 21f0ffaff510b0530bfdf77da7133c0b99dee2fe (patch) | |
tree | 0302c053444f3a371d7c188a33f9834fb7cbc31e /net/sunrpc/xprtmultipath.c | |
parent | 44942b4e457beda00981f616402a1a791e8c616e (diff) | |
download | linux-21f0ffaff510b0530bfdf77da7133c0b99dee2fe.tar.xz |
SUNRPC: Add basic load balancing to the transport switch
For now, just count the queue length. It is less accurate than counting
number of bytes queued, but easier to implement.
Signed-off-by: Trond Myklebust <trond.myklebust@primarydata.com>
Diffstat (limited to 'net/sunrpc/xprtmultipath.c')
-rw-r--r-- | net/sunrpc/xprtmultipath.c | 20 |
1 files changed, 19 insertions, 1 deletions
diff --git a/net/sunrpc/xprtmultipath.c b/net/sunrpc/xprtmultipath.c index 8394124126f8..394e427533be 100644 --- a/net/sunrpc/xprtmultipath.c +++ b/net/sunrpc/xprtmultipath.c @@ -36,6 +36,7 @@ static void xprt_switch_add_xprt_locked(struct rpc_xprt_switch *xps, if (xps->xps_nxprts == 0) xps->xps_net = xprt->xprt_net; xps->xps_nxprts++; + xps->xps_nactive++; } /** @@ -62,6 +63,7 @@ static void xprt_switch_remove_xprt_locked(struct rpc_xprt_switch *xps, { if (unlikely(xprt == NULL)) return; + xps->xps_nactive--; xps->xps_nxprts--; if (xps->xps_nxprts == 0) xps->xps_net = NULL; @@ -317,8 +319,24 @@ struct rpc_xprt *xprt_switch_find_next_entry_roundrobin(struct list_head *head, static struct rpc_xprt *xprt_iter_next_entry_roundrobin(struct rpc_xprt_iter *xpi) { - return xprt_iter_next_entry_multiple(xpi, + struct rpc_xprt_switch *xps = rcu_dereference(xpi->xpi_xpswitch); + struct rpc_xprt *xprt; + unsigned long xprt_queuelen; + unsigned long xps_queuelen; + unsigned long xps_avglen; + + do { + xprt = xprt_iter_next_entry_multiple(xpi, xprt_switch_find_next_entry_roundrobin); + if (xprt == NULL) + break; + xprt_queuelen = atomic_long_read(&xprt->queuelen); + if (xprt_queuelen <= 2) + break; + xps_queuelen = atomic_long_read(&xps->xps_queuelen); + xps_avglen = DIV_ROUND_UP(xps_queuelen, xps->xps_nactive); + } while (xprt_queuelen > xps_avglen); + return xprt; } static |