summaryrefslogtreecommitdiff
path: root/drivers/nvme/target
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2023-10-02 13:54:28 +0300
committerKeith Busch <kbusch@kernel.org>2023-10-10 18:03:22 +0300
commitd920abd1e7c4884f9ecd0749d1921b7ab19ddfbd (patch)
tree5404c082d6b0e59db8cb20be6f2b5bbee4cd3836 /drivers/nvme/target
parent3820c4fdc247b6f0a4162733bdb8ddf8f2e8a1e4 (diff)
downloadlinux-d920abd1e7c4884f9ecd0749d1921b7ab19ddfbd.tar.xz
nvmet-tcp: Fix a possible UAF in queue intialization setup
From Alon: "Due to a logical bug in the NVMe-oF/TCP subsystem in the Linux kernel, a malicious user can cause a UAF and a double free, which may lead to RCE (may also lead to an LPE in case the attacker already has local privileges)." Hence, when a queue initialization fails after the ahash requests are allocated, it is guaranteed that the queue removal async work will be called, hence leave the deallocation to the queue removal. Also, be extra careful not to continue processing the socket, so set queue rcv_state to NVMET_TCP_RECV_ERR upon a socket error. Cc: stable@vger.kernel.org Reported-by: Alon Zahavi <zahavi.alon@gmail.com> Tested-by: Alon Zahavi <zahavi.alon@gmail.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Keith Busch <kbusch@kernel.org>
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r--drivers/nvme/target/tcp.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index cd92d7ddf5ed..197fc2ecb164 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -372,6 +372,7 @@ static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
{
+ queue->rcv_state = NVMET_TCP_RECV_ERR;
if (status == -EPIPE || status == -ECONNRESET)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
else
@@ -910,15 +911,11 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
iov.iov_len = sizeof(*icresp);
ret = kernel_sendmsg(queue->sock, &msg, &iov, 1, iov.iov_len);
if (ret < 0)
- goto free_crypto;
+ return ret; /* queue removal will cleanup */
queue->state = NVMET_TCP_Q_LIVE;
nvmet_prepare_receive_pdu(queue);
return 0;
-free_crypto:
- if (queue->hdr_digest || queue->data_digest)
- nvmet_tcp_free_crypto(queue);
- return ret;
}
static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,