summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
authorHannes Reinecke <hare@suse.de>2023-08-24 17:39:21 +0300
committerKeith Busch <kbusch@kernel.org>2023-10-11 20:29:59 +0300
commit79a4f186ad37423996e7835e9d3d6055ff1a181d (patch)
tree5e6acc66c916edc0cb74cba2f1037b09b5e635d3 /drivers/nvme/target
parent4f8cce2d32a3011bff8c8a4bbc69bca5a7bdd030 (diff)
downloadlinux-79a4f186ad37423996e7835e9d3d6055ff1a181d.tar.xz
nvmet-tcp: allocate socket file
For the TLS upcall we need to allocate a socket file such that the userspace daemon is able to use the socket. Signed-off-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Keith Busch <kbusch@kernel.org>
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/tcp.c18
1 files changed, 14 insertions, 4 deletions
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 0dd90770a2e1..e72786b70f68 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -1493,12 +1493,12 @@ static void nvmet_tcp_release_queue_work(struct work_struct *w)
nvmet_sq_destroy(&queue->nvme_sq);
cancel_work_sync(&queue->io_work);
nvmet_tcp_free_cmd_data_in_buffers(queue);
- sock_release(queue->sock);
+ /* ->sock will be released by fput() */
+ fput(queue->sock->file);
nvmet_tcp_free_cmds(queue);
if (queue->hdr_digest || queue->data_digest)
nvmet_tcp_free_crypto(queue);
ida_free(&nvmet_tcp_queue_ida, queue->idx);
-
page = virt_to_head_page(queue->pf_cache.va);
__page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias);
kfree(queue);
@@ -1625,6 +1625,7 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
struct socket *newsock)
{
struct nvmet_tcp_queue *queue;
+ struct file *sock_file = NULL;
int ret;
queue = kzalloc(sizeof(*queue), GFP_KERNEL);
@@ -1644,10 +1645,16 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port,
init_llist_head(&queue->resp_list);
INIT_LIST_HEAD(&queue->resp_send_list);
+ sock_file = sock_alloc_file(queue->sock, O_CLOEXEC, NULL);
+ if (IS_ERR(sock_file)) {
+ ret = PTR_ERR(sock_file);
+ goto out_free_queue;
+ }
+
queue->idx = ida_alloc(&nvmet_tcp_queue_ida, GFP_KERNEL);
if (queue->idx < 0) {
ret = queue->idx;
- goto out_free_queue;
+ goto out_sock;
}
ret = nvmet_tcp_alloc_cmd(queue, &queue->connect);
@@ -1678,11 +1685,14 @@ out_free_connect:
nvmet_tcp_free_cmd(&queue->connect);
out_ida_remove:
ida_free(&nvmet_tcp_queue_ida, queue->idx);
+out_sock:
+ fput(queue->sock->file);
out_free_queue:
kfree(queue);
out_release:
pr_err("failed to allocate queue, error %d\n", ret);
- sock_release(newsock);
+ if (!sock_file)
+ sock_release(newsock);
}
static void nvmet_tcp_accept_work(struct work_struct *w)