diff options
Diffstat (limited to 'drivers/nvme/target')
-rw-r--r-- | drivers/nvme/target/admin-cmd.c | 2 | ||||
-rw-r--r-- | drivers/nvme/target/configfs.c | 1 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 12 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 6 | ||||
-rw-r--r-- | drivers/nvme/target/fcloop.c | 2 | ||||
-rw-r--r-- | drivers/nvme/target/io-cmd-bdev.c | 1 | ||||
-rw-r--r-- | drivers/nvme/target/loop.c | 6 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 2 | ||||
-rw-r--r-- | drivers/nvme/target/passthru.c | 70 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 4 | ||||
-rw-r--r-- | drivers/nvme/target/tcp.c | 31 |
11 files changed, 93 insertions, 44 deletions
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index e9fe91786bbb..dca34489a1dc 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -727,7 +727,9 @@ u16 nvmet_set_feat_kato(struct nvmet_req *req) { u32 val32 = le32_to_cpu(req->cmd->common.cdw11); + nvmet_stop_keep_alive_timer(req->sq->ctrl); req->sq->ctrl->kato = DIV_ROUND_UP(val32, 1000); + nvmet_start_keep_alive_timer(req->sq->ctrl); nvmet_set_result(req, req->sq->ctrl->kato); diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 74b2b61c773b..37e1d7784e17 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1136,6 +1136,7 @@ static ssize_t nvmet_subsys_attr_model_store(struct config_item *item, up_write(&nvmet_config_sem); kfree_rcu(new_model, rcuhead); + kfree(new_model_number); return count; } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b92f45f5cd5b..25d62d867563 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -73,7 +73,7 @@ inline u16 errno_to_nvme_status(struct nvmet_req *req, int errno) status = NVME_SC_ACCESS_DENIED; break; case -EIO: - /* FALLTHRU */ + fallthrough; default: req->error_loc = offsetof(struct nvme_common_command, opcode); status = NVME_SC_INTERNAL | NVME_SC_DNR; @@ -395,8 +395,11 @@ static void nvmet_keep_alive_timer(struct work_struct *work) nvmet_ctrl_fatal_error(ctrl); } -static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) +void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d start keep-alive timer for %d secs\n", ctrl->cntlid, ctrl->kato); @@ -404,8 +407,11 @@ static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ); } -static void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) +void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl) { + if (unlikely(ctrl->kato == 0)) + return; + pr_debug("ctrl %d stop keep-alive\n", ctrl->cntlid); cancel_delayed_work_sync(&ctrl->ka_work); diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 55bafd56166a..cd4e73aa9807 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1019,7 +1019,7 @@ static void nvmet_fc_free_hostport(struct nvmet_fc_hostport *hostport) { /* if LLDD not implemented, leave as NULL */ - if (!hostport->hosthandle) + if (!hostport || !hostport->hosthandle) return; nvmet_fc_hostport_put(hostport); @@ -2342,9 +2342,9 @@ nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod) return; if (fcpreq->fcp_error || fcpreq->transferred_length != fcpreq->transfer_length) { - spin_lock(&fod->flock); + spin_lock_irqsave(&fod->flock, flags); fod->abort = true; - spin_unlock(&fod->flock); + spin_unlock_irqrestore(&fod->flock, flags); nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); return; diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c index c97e60b71bbc..3da067a8311e 100644 --- a/drivers/nvme/target/fcloop.c +++ b/drivers/nvme/target/fcloop.c @@ -812,7 +812,7 @@ fcloop_fcp_op(struct nvmet_fc_target_port *tgtport, break; /* Fall-Thru to RSP handling */ - /* FALLTHRU */ + fallthrough; case NVMET_FCOP_RSP: if (fcpreq) { diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 3dd6f566a240..125dde3f410e 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -139,7 +139,6 @@ static u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) req->error_loc = offsetof(struct nvme_rw_command, nsid); break; case BLK_STS_IOERR: - /* fallthru */ default: status = NVME_SC_INTERNAL | NVME_SC_DNR; req->error_loc = offsetof(struct nvme_common_command, opcode); diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 4884ef1e46a2..f6d81239be21 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -115,7 +115,7 @@ static void nvme_loop_queue_response(struct nvmet_req *req) return; } - if (!nvme_end_request(rq, cqe->status, cqe->result)) + if (!nvme_try_complete_req(rq, cqe->status, cqe->result)) nvme_loop_complete_rq(rq); } } @@ -579,7 +579,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_loop_ctrl_ops, 0 /* no quirks, we're perfect! */); if (ret) - goto out_put_ctrl; + goto out; if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) WARN_ON_ONCE(1); @@ -635,8 +635,8 @@ out_free_queues: kfree(ctrl->queues); out_uninit_ctrl: nvme_uninit_ctrl(&ctrl->ctrl); -out_put_ctrl: nvme_put_ctrl(&ctrl->ctrl); +out: if (ret > 0) ret = -EIO; return ERR_PTR(ret); diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 47ee3fb193bd..559a15ccc322 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -395,6 +395,8 @@ void nvmet_get_feat_async_event(struct nvmet_req *req); u16 nvmet_set_feat_kato(struct nvmet_req *req); u16 nvmet_set_feat_async_event(struct nvmet_req *req, u32 mask); void nvmet_execute_async_event(struct nvmet_req *req); +void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl); +void nvmet_stop_keep_alive_timer(struct nvmet_ctrl *ctrl); u16 nvmet_parse_connect_cmd(struct nvmet_req *req); void nvmet_bdev_set_limits(struct block_device *bdev, struct nvme_id_ns *id); diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 89d91dc999a6..56c571052216 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -165,7 +165,7 @@ static void nvmet_passthru_execute_cmd_work(struct work_struct *w) req->cqe->result = nvme_req(rq)->result; nvmet_req_complete(req, status); - blk_put_request(rq); + blk_mq_free_request(rq); } static void nvmet_passthru_req_done(struct request *rq, @@ -175,7 +175,7 @@ static void nvmet_passthru_req_done(struct request *rq, req->cqe->result = nvme_req(rq)->result; nvmet_req_complete(req, nvme_req(rq)->status); - blk_put_request(rq); + blk_mq_free_request(rq); } static int nvmet_passthru_map_sg(struct nvmet_req *req, struct request *rq) @@ -230,7 +230,7 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) if (unlikely(!ns)) { pr_err("failed to get passthru ns nsid:%u\n", nsid); status = NVME_SC_INVALID_NS | NVME_SC_DNR; - goto fail_out; + goto out; } q = ns->queue; @@ -238,16 +238,15 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) rq = nvme_alloc_request(q, req->cmd, BLK_MQ_REQ_NOWAIT, NVME_QID_ANY); if (IS_ERR(rq)) { - rq = NULL; status = NVME_SC_INTERNAL; - goto fail_out; + goto out_put_ns; } if (req->sg_cnt) { ret = nvmet_passthru_map_sg(req, rq); if (unlikely(ret)) { status = NVME_SC_INTERNAL; - goto fail_out; + goto out_put_req; } } @@ -274,11 +273,13 @@ static void nvmet_passthru_execute_cmd(struct nvmet_req *req) return; -fail_out: +out_put_req: + blk_mq_free_request(rq); +out_put_ns: if (ns) nvme_put_ns(ns); +out: nvmet_req_complete(req, status); - blk_put_request(rq); } /* @@ -326,6 +327,10 @@ static u16 nvmet_setup_passthru_command(struct nvmet_req *req) u16 nvmet_parse_passthru_io_cmd(struct nvmet_req *req) { + /* Reject any commands with non-sgl flags set (ie. fused commands) */ + if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) + return NVME_SC_INVALID_FIELD; + switch (req->cmd->common.opcode) { case nvme_cmd_resv_register: case nvme_cmd_resv_report: @@ -396,6 +401,10 @@ static u16 nvmet_passthru_get_set_features(struct nvmet_req *req) u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) { + /* Reject any commands with non-sgl flags set (ie. fused commands) */ + if (req->cmd->common.flags & ~NVME_CMD_SGL_ALL) + return NVME_SC_INVALID_FIELD; + /* * Passthru all vendor specific commands */ @@ -447,10 +456,26 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; return NVME_SC_SUCCESS; + case NVME_ID_CNS_CS_CTRL: + switch (req->cmd->identify.csi) { + case NVME_CSI_ZNS: + req->execute = nvmet_passthru_execute_cmd; + req->p.use_workqueue = true; + return NVME_SC_SUCCESS; + } + return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; case NVME_ID_CNS_NS: req->execute = nvmet_passthru_execute_cmd; req->p.use_workqueue = true; return NVME_SC_SUCCESS; + case NVME_ID_CNS_CS_NS: + switch (req->cmd->identify.csi) { + case NVME_CSI_ZNS: + req->execute = nvmet_passthru_execute_cmd; + req->p.use_workqueue = true; + return NVME_SC_SUCCESS; + } + return NVME_SC_INVALID_OPCODE | NVME_SC_DNR; default: return nvmet_setup_passthru_command(req); } @@ -465,6 +490,7 @@ u16 nvmet_parse_passthru_admin_cmd(struct nvmet_req *req) int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys) { struct nvme_ctrl *ctrl; + struct file *file; int ret = -EINVAL; void *old; @@ -479,24 +505,29 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys) goto out_unlock; } - ctrl = nvme_ctrl_get_by_path(subsys->passthru_ctrl_path); - if (IS_ERR(ctrl)) { - ret = PTR_ERR(ctrl); + file = filp_open(subsys->passthru_ctrl_path, O_RDWR, 0); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto out_unlock; + } + + ctrl = nvme_ctrl_from_file(file); + if (!ctrl) { pr_err("failed to open nvme controller %s\n", subsys->passthru_ctrl_path); - goto out_unlock; + goto out_put_file; } old = xa_cmpxchg(&passthru_subsystems, ctrl->cntlid, NULL, subsys, GFP_KERNEL); if (xa_is_err(old)) { ret = xa_err(old); - goto out_put_ctrl; + goto out_put_file; } if (old) - goto out_put_ctrl; + goto out_put_file; subsys->passthru_ctrl = ctrl; subsys->ver = ctrl->vs; @@ -507,12 +538,12 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys) NVME_TERTIARY(subsys->ver)); subsys->ver = NVME_VS(1, 2, 1); } + nvme_get_ctrl(ctrl); + __module_get(subsys->passthru_ctrl->ops->module); + ret = 0; - mutex_unlock(&subsys->lock); - return 0; - -out_put_ctrl: - nvme_put_ctrl(ctrl); +out_put_file: + filp_close(file, NULL); out_unlock: mutex_unlock(&subsys->lock); return ret; @@ -522,6 +553,7 @@ static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys) { if (subsys->passthru_ctrl) { xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid); + module_put(subsys->passthru_ctrl->ops->module); nvme_put_ctrl(subsys->passthru_ctrl); } subsys->passthru_ctrl = NULL; diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 3ccb59260b4a..ae6620489457 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c @@ -1758,7 +1758,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, schedule_delayed_work(&port->repair_work, 0); break; } - /* FALLTHROUGH */ + fallthrough; case RDMA_CM_EVENT_DISCONNECTED: case RDMA_CM_EVENT_TIMEWAIT_EXIT: nvmet_rdma_queue_disconnect(queue); @@ -1769,7 +1769,7 @@ static int nvmet_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_REJECTED: pr_debug("Connection rejected: %s\n", rdma_reject_msg(cm_id, event->status)); - /* FALLTHROUGH */ + fallthrough; case RDMA_CM_EVENT_UNREACHABLE: case RDMA_CM_EVENT_CONNECT_ERROR: nvmet_rdma_queue_connect_fail(cm_id, queue); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 9eda91162fe4..dc1f0f647189 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -94,7 +94,6 @@ struct nvmet_tcp_queue { struct socket *sock; struct nvmet_tcp_port *port; struct work_struct io_work; - int cpu; struct nvmet_cq nvme_cq; struct nvmet_sq nvme_sq; @@ -144,7 +143,6 @@ struct nvmet_tcp_port { struct work_struct accept_work; struct nvmet_port *nport; struct sockaddr_storage addr; - int last_cpu; void (*data_ready)(struct sock *); }; @@ -160,6 +158,11 @@ static void nvmet_tcp_finish_cmd(struct nvmet_tcp_cmd *cmd); static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, struct nvmet_tcp_cmd *cmd) { + if (unlikely(!queue->nr_cmds)) { + /* We didn't allocate cmds yet, send 0xffff */ + return USHRT_MAX; + } + return cmd - queue->cmds; } @@ -214,6 +217,11 @@ static inline void nvmet_tcp_put_cmd(struct nvmet_tcp_cmd *cmd) list_add_tail(&cmd->entry, &cmd->queue->free_list); } +static inline int queue_cpu(struct nvmet_tcp_queue *queue) +{ + return queue->sock->sk->sk_incoming_cpu; +} + static inline u8 nvmet_tcp_hdgst_len(struct nvmet_tcp_queue *queue) { return queue->hdr_digest ? NVME_TCP_DIGEST_LENGTH : 0; @@ -501,7 +509,7 @@ static void nvmet_tcp_queue_response(struct nvmet_req *req) struct nvmet_tcp_queue *queue = cmd->queue; llist_add(&cmd->lentry, &queue->resp_list); - queue_work_on(cmd->queue->cpu, nvmet_tcp_wq, &cmd->queue->io_work); + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &cmd->queue->io_work); } static int nvmet_try_send_data_pdu(struct nvmet_tcp_cmd *cmd) @@ -866,7 +874,10 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue) struct nvme_tcp_data_pdu *data = &queue->pdu.data; struct nvmet_tcp_cmd *cmd; - cmd = &queue->cmds[data->ttag]; + if (likely(queue->nr_cmds)) + cmd = &queue->cmds[data->ttag]; + else + cmd = &queue->connect; if (le32_to_cpu(data->data_offset) != cmd->rbytes_done) { pr_err("ttag %u unexpected data offset %u (expected %u)\n", @@ -1215,7 +1226,7 @@ static void nvmet_tcp_io_work(struct work_struct *w) * We exahusted our budget, requeue our selves */ if (pending) - queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); } static int nvmet_tcp_alloc_cmd(struct nvmet_tcp_queue *queue, @@ -1375,7 +1386,7 @@ static void nvmet_tcp_data_ready(struct sock *sk) read_lock_bh(&sk->sk_callback_lock); queue = sk->sk_user_data; if (likely(queue)) - queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); read_unlock_bh(&sk->sk_callback_lock); } @@ -1395,7 +1406,7 @@ static void nvmet_tcp_write_space(struct sock *sk) if (sk_stream_is_writeable(sk)) { clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags); - queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); } out: read_unlock_bh(&sk->sk_callback_lock); @@ -1504,9 +1515,6 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, if (ret) goto out_free_connect; - port->last_cpu = cpumask_next_wrap(port->last_cpu, - cpu_online_mask, -1, false); - queue->cpu = port->last_cpu; nvmet_prepare_receive_pdu(queue); mutex_lock(&nvmet_tcp_queue_mutex); @@ -1517,7 +1525,7 @@ static int nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, if (ret) goto out_destroy_sq; - queue_work_on(queue->cpu, nvmet_tcp_wq, &queue->io_work); + queue_work_on(queue_cpu(queue), nvmet_tcp_wq, &queue->io_work); return 0; out_destroy_sq: @@ -1604,7 +1612,6 @@ static int nvmet_tcp_add_port(struct nvmet_port *nport) } port->nport = nport; - port->last_cpu = -1; INIT_WORK(&port->accept_work, nvmet_tcp_accept_work); if (port->nport->inline_data_size < 0) port->nport->inline_data_size = NVMET_TCP_DEF_INLINE_DATA_SIZE; |