diff options
author | Mark Wunderlich <mark.wunderlich@intel.com> | 2020-08-28 04:00:53 +0300 |
---|---|---|
committer | Christoph Hellwig <hch@lst.de> | 2020-09-27 10:14:18 +0300 |
commit | f7790e5de92a3b88e74704a6cb2276c09c32f511 (patch) | |
tree | 809af3624b821a464e755f9a30bda1db4d795c5d /drivers/nvme/target/tcp.c | |
parent | 0b85f59d30b91bd2b93ea7ef0816a4b7e7039e8c (diff) | |
download | linux-f7790e5de92a3b88e74704a6cb2276c09c32f511.tar.xz |
nvmet-tcp: have queue io_work context run on sock incoming cpu
No real good need to spread queues artificially. Usually the
target will serve multiple hosts, and it's better to run on the socket
incoming cpu for better affinitization rather than spread queues on all
online cpus.
We rely on RSS to spread the work around sufficiently.
Signed-off-by: Mark Wunderlich <mark.wunderlich@intel.com>
Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'drivers/nvme/target/tcp.c')
-rw-r--r-- | drivers/nvme/target/tcp.c | 21 |
1 files changed, 10 insertions, 11 deletions
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 8e0d766d2722..dc1f0f647189 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -94,7 +94,6 @@ struct nvmet_tcp_queue { struct socket *sock; struct nvmet_tcp_port *port; struct work_struct io_work; - int cpu; struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; @@ -144,7 +143,6 @@ struct nvmet_tcp_port { struct work_struct accept_work; struct nvmet_port *nport; struct sockaddr_storage addr; - int last_cpu; void (*data_ready)(struct sock *); }; @@ -219,6 +217,11 @@ static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) list_add_tail(&cmd->entry, &cmd->queue->free_list); } +static inline int queue_cpu(struct nvmet_tcp_queue *queue) +{ + return queue->sock->sk->sk_incoming_cpu; +} + static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) { return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; @@ -506,7 +509,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) struct nvmet_tcp_queue *queue = cmd->queue; llist_add(&cmd->lentry, &queue->resp_list); - queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work); + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); } static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) @@ -1223,7 +1226,7 @@ static void nvmet_tcp_io_work(struct work_struct *w) * We exahusted our budget, requeue our selves */ if (pending) - queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); } static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, @@ -1383,7 +1386,7 @@ static void nvmet_tcp_data_ready(struct sock *sk) read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (likely(queue)) - queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); read_unlock_bh(&sk->sk_callback_lock); } @@ -1403,7 +1406,7 @@ static void nvmet_tcp_write_space(struct sock *sk) if (sk_stream_is_writeable(sk)) { clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); } out: read_unlock_bh(&sk->sk_callback_lock); @@ -1512,9 +1515,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, if (ret) goto out_free_connect; - port->last_cpu = cpumask_next_wrap(port->last_cpu, - cpu_online_mask, -1, false); - queue->cpu = port->last_cpu; nvmet_prepare_receive_pdu(queue); mutex_lock(&nvmet_tcp_queue_mutex); @@ -1525,7 +1525,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, if (ret) goto out_destroy_sq; - queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); return 0; out_destroy_sq: @@ -1612,7 +1612,6 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport) } port->nport = nport; - port->last_cpu = -1; INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); if (port->nport->inline_data_size < 0) port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; |