summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaurizio Lombardi <mlombard@redhat.com>2026-03-16 17:39:35 +0300
committerKeith Busch <kbusch@kernel.org>2026-04-08 22:05:00 +0300
commitea8e356acb165cb1fd75537a52e1f66e5e76c538 (patch)
treec7ef7bd2d4e142d3c30ab194698ef0d01de3e030
parent23528aa3320a74b028e990b5a939fed32a8afc2f (diff)
downloadlinux-ea8e356acb165cb1fd75537a52e1f66e5e76c538.tar.xz
nvmet-tcp: propagate nvmet_tcp_build_pdu_iovec() errors to its callers
Currently, when nvmet_tcp_build_pdu_iovec() detects an out-of-bounds PDU length or offset, it triggers nvmet_tcp_fatal_error(cmd->queue) and returns early. However, because the function returns void, the callers are entirely unaware that a fatal error has occurred and that the cmd->recv_msg.msg_iter was left uninitialized. Callers such as nvmet_tcp_handle_h2c_data_pdu() proceed to blindly overwrite the queue state with queue->rcv_state = NVMET_TCP_RECV_DATA Consequently, the socket receiving loop may attempt to read incoming network data into the uninitialized iterator. Fix this by shifting the error handling responsibility to the callers. Fixes: 52a0a9854934 ("nvmet-tcp: add bounds checks in nvmet_tcp_build_pdu_iovec") Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Yunje Shin <ioerts@kookmin.ac.kr> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Maurizio Lombardi <mlombard@redhat.com> Signed-off-by: Keith Busch <kbusch@kernel.org>
-rw-r--r--drivers/nvme/target/tcp.c51
1 files changed, 29 insertions, 22 deletions
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 69e971b179ae..3ade734c8bcf 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -351,7 +351,7 @@ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue);
-static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
+static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
struct bio_vec *iov = cmd->iov;
struct scatterlist *sg;
@@ -364,22 +364,19 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
offset = cmd->rbytes_done;
cmd->sg_idx = offset / PAGE_SIZE;
sg_offset = offset % PAGE_SIZE;
- if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt) {
- nvmet_tcp_fatal_error(cmd->queue);
- return;
- }
+ if (!cmd->req.sg_cnt || cmd->sg_idx >= cmd->req.sg_cnt)
+ return -EPROTO;
+
sg = &cmd->req.sg[cmd->sg_idx];
sg_remaining = cmd->req.sg_cnt - cmd->sg_idx;
while (length) {
- if (!sg_remaining) {
- nvmet_tcp_fatal_error(cmd->queue);
- return;
- }
- if (!sg->length || sg->length <= sg_offset) {
- nvmet_tcp_fatal_error(cmd->queue);
- return;
- }
+ if (!sg_remaining)
+ return -EPROTO;
+
+ if (!sg->length || sg->length <= sg_offset)
+ return -EPROTO;
+
u32 iov_len = min_t(u32, length, sg->length - sg_offset);
bvec_set_page(iov, sg_page(sg), iov_len,
@@ -394,6 +391,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
iov_iter_bvec(&cmd->recv_msg.msg_iter, ITER_DEST, cmd->iov,
nr_pages, cmd->pdu_len);
+ return 0;
}
static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
@@ -931,7 +929,7 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
return 0;
}
-static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
+static int nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
struct nvmet_tcp_cmd *cmd, struct nvmet_req *req)
{
size_t data_len = le32_to_cpu(req->cmd->common.dptr.sgl.length);
@@ -947,19 +945,23 @@ static void nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
if (!nvme_is_write(cmd->req.cmd) || !data_len ||
data_len > cmd->req.port->inline_data_size) {
nvmet_prepare_receive_pdu(queue);
- return;
+ return 0;
}
ret = nvmet_tcp_map_data(cmd);
if (unlikely(ret)) {
pr_err("queue %d: failed to map data\n", queue->idx);
nvmet_tcp_fatal_error(queue);
- return;
+ return -EPROTO;
}
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_build_pdu_iovec(cmd);
cmd->flags |= NVMET_TCP_F_INIT_FAILED;
+ ret = nvmet_tcp_build_pdu_iovec(cmd);
+ if (unlikely(ret))
+ pr_err("queue %d: failed to build PDU iovec\n", queue->idx);
+
+ return ret;
}
static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
@@ -1011,7 +1013,10 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
goto err_proto;
}
cmd->pdu_recv = 0;
- nvmet_tcp_build_pdu_iovec(cmd);
+ if (unlikely(nvmet_tcp_build_pdu_iovec(cmd))) {
+ pr_err("queue %d: failed to build PDU iovec\n", queue->idx);
+ goto err_proto;
+ }
queue->cmd = cmd;
queue->rcv_state = NVMET_TCP_RECV_DATA;
@@ -1074,8 +1079,7 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
le32_to_cpu(req->cmd->common.dptr.sgl.length),
le16_to_cpu(req->cqe->status));
- nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
- return 0;
+ return nvmet_tcp_handle_req_failure(queue, queue->cmd, req);
}
ret = nvmet_tcp_map_data(queue->cmd);
@@ -1092,8 +1096,11 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (nvmet_tcp_need_data_in(queue->cmd)) {
if (nvmet_tcp_has_inline_data(queue->cmd)) {
queue->rcv_state = NVMET_TCP_RECV_DATA;
- nvmet_tcp_build_pdu_iovec(queue->cmd);
- return 0;
+ ret = nvmet_tcp_build_pdu_iovec(queue->cmd);
+ if (unlikely(ret))
+ pr_err("queue %d: failed to build PDU iovec\n",
+ queue->idx);
+ return ret;
}
/* send back R2T */
nvmet_tcp_queue_response(&queue->cmd->req);