summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMaurizio Lombardi <mlombard@redhat.com>2026-03-16 17:39:36 +0300
committerKeith Busch <kbusch@kernel.org>2026-04-08 22:05:06 +0300
commitbad44c9c312f07b590ad7be892a95693baba976e (patch)
tree71ca98aa9331b3ee41de84a1062320eb7fe60e2f
parentea8e356acb165cb1fd75537a52e1f66e5e76c538 (diff)
downloadlinux-bad44c9c312f07b590ad7be892a95693baba976e.tar.xz
nvmet-tcp: remove redundant calls to nvmet_tcp_fatal_error()
Executing nvmet_tcp_fatal_error() is generally the responsibility of the caller (nvmet_tcp_try_recv); all other functions should just return the error code. Remove the nvmet_tcp_fatal_error() function, it's not needed anymore. Reviewed-by: Hannes Reinecke <hare@suse.de> Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com> Signed-off-by: Maurizio Lombardi <mlombard@redhat.com> Signed-off-by: Keith Busch <kbusch@kernel.org>
-rw-r--r--drivers/nvme/target/tcp.c37
1 files changed, 7 insertions, 30 deletions
diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c
index 3ade734c8bcf..a4c3c62e33f5 100644
--- a/drivers/nvme/target/tcp.c
+++ b/drivers/nvme/target/tcp.c
@@ -349,8 +349,6 @@ static void nvmet_tcp_free_cmd_buffers(struct nvmet_tcp_cmd *cmd)
cmd->req.sg = NULL;
}
-static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue);
-
static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
{
struct bio_vec *iov = cmd->iov;
@@ -394,22 +392,13 @@ static int nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd)
return 0;
}
-static void nvmet_tcp_fatal_error(struct nvmet_tcp_queue *queue)
-{
- queue->rcv_state = NVMET_TCP_RECV_ERR;
- if (queue->nvme_sq.ctrl)
- nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
- else
- kernel_sock_shutdown(queue->sock, SHUT_RDWR);
-}
-
static void nvmet_tcp_socket_error(struct nvmet_tcp_queue *queue, int status)
{
queue->rcv_state = NVMET_TCP_RECV_ERR;
- if (status == -EPIPE || status == -ECONNRESET)
+ if (status == -EPIPE || status == -ECONNRESET || !queue->nvme_sq.ctrl)
kernel_sock_shutdown(queue->sock, SHUT_RDWR);
else
- nvmet_tcp_fatal_error(queue);
+ nvmet_ctrl_fatal_error(queue->nvme_sq.ctrl);
}
static int nvmet_tcp_map_data(struct nvmet_tcp_cmd *cmd)
@@ -885,7 +874,6 @@ static int nvmet_tcp_handle_icreq(struct nvmet_tcp_queue *queue)
if (le32_to_cpu(icreq->hdr.plen) != sizeof(struct nvme_tcp_icreq_pdu)) {
pr_err("bad nvme-tcp pdu length (%d)\n",
le32_to_cpu(icreq->hdr.plen));
- nvmet_tcp_fatal_error(queue);
return -EPROTO;
}
@@ -951,7 +939,6 @@ static int nvmet_tcp_handle_req_failure(struct nvmet_tcp_queue *queue,
ret = nvmet_tcp_map_data(cmd);
if (unlikely(ret)) {
pr_err("queue %d: failed to map data\n", queue->idx);
- nvmet_tcp_fatal_error(queue);
return -EPROTO;
}
@@ -1024,7 +1011,6 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
err_proto:
/* FIXME: use proper transport errors */
- nvmet_tcp_fatal_error(queue);
return -EPROTO;
}
@@ -1039,7 +1025,6 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (hdr->type != nvme_tcp_icreq) {
pr_err("unexpected pdu type (%d) before icreq\n",
hdr->type);
- nvmet_tcp_fatal_error(queue);
return -EPROTO;
}
return nvmet_tcp_handle_icreq(queue);
@@ -1048,7 +1033,6 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (unlikely(hdr->type == nvme_tcp_icreq)) {
pr_err("queue %d: received icreq pdu in state %d\n",
queue->idx, queue->state);
- nvmet_tcp_fatal_error(queue);
return -EPROTO;
}
@@ -1065,7 +1049,6 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
pr_err("queue %d: out of commands (%d) send_list_len: %d, opcode: %d",
queue->idx, queue->nr_cmds, queue->send_list_len,
nvme_cmd->common.opcode);
- nvmet_tcp_fatal_error(queue);
return -ENOMEM;
}
@@ -1086,9 +1069,9 @@ static int nvmet_tcp_done_recv_pdu(struct nvmet_tcp_queue *queue)
if (unlikely(ret)) {
pr_err("queue %d: failed to map data\n", queue->idx);
if (nvmet_tcp_has_inline_data(queue->cmd))
- nvmet_tcp_fatal_error(queue);
- else
- nvmet_req_complete(req, ret);
+ return -EPROTO;
+
+ nvmet_req_complete(req, ret);
ret = -EAGAIN;
goto out;
}
@@ -1211,7 +1194,6 @@ recv:
if (unlikely(!nvmet_tcp_pdu_valid(hdr->type))) {
pr_err("unexpected pdu type %d\n", hdr->type);
- nvmet_tcp_fatal_error(queue);
return -EIO;
}
@@ -1225,16 +1207,12 @@ recv:
}
if (queue->hdr_digest &&
- nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen)) {
- nvmet_tcp_fatal_error(queue); /* fatal */
+ nvmet_tcp_verify_hdgst(queue, &queue->pdu, hdr->hlen))
return -EPROTO;
- }
if (queue->data_digest &&
- nvmet_tcp_check_ddgst(queue, &queue->pdu)) {
- nvmet_tcp_fatal_error(queue); /* fatal */
+ nvmet_tcp_check_ddgst(queue, &queue->pdu))
return -EPROTO;
- }
return nvmet_tcp_done_recv_pdu(queue);
}
@@ -1320,7 +1298,6 @@ static int nvmet_tcp_try_recv_ddgst(struct nvmet_tcp_queue *queue)
if (!(cmd->flags & NVMET_TCP_F_INIT_FAILED))
nvmet_req_uninit(&cmd->req);
nvmet_tcp_free_cmd_buffers(cmd);
- nvmet_tcp_fatal_error(queue);
ret = -EPROTO;
goto out;
}