summaryrefslogtreecommitdiff
path: root/drivers/nvme
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme')
-rw-r--r--drivers/nvme/host/core.c12
-rw-r--r--drivers/nvme/host/fc.c75
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/nvme.h3
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/rdma.c19
7 files changed, 83 insertions, 31 deletions
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2e65be8b1387..3cf1b773158e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->ndev)
nvme_nvm_update_nvm_info(ns);
#ifdef CONFIG_NVME_MULTIPATH
- if (ns->head->disk)
+ if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+ }
#endif
}
@@ -3312,6 +3314,9 @@ void nvme_remove_namespaces(struct nvme_ctrl *ctrl)
struct nvme_ns *ns, *next;
LIST_HEAD(ns_list);
+ /* prevent racing with ns scanning */
+ flush_work(&ctrl->scan_work);
+
/*
* The dead states indicates the controller was not gracefully
* disconnected. In that case, we won't be able to flush any data while
@@ -3474,7 +3479,6 @@ void nvme_stop_ctrl(struct nvme_ctrl *ctrl)
nvme_mpath_stop(ctrl);
nvme_stop_keep_alive(ctrl);
flush_work(&ctrl->async_event_work);
- flush_work(&ctrl->scan_work);
cancel_work_sync(&ctrl->fw_act_work);
if (ctrl->ops->stop_ctrl)
ctrl->ops->stop_ctrl(ctrl);
@@ -3583,7 +3587,7 @@ int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev,
return 0;
out_free_name:
- kfree_const(dev->kobj.name);
+ kfree_const(ctrl->device->kobj.name);
out_release_instance:
ida_simple_remove(&nvme_instance_ida, ctrl->instance);
out:
@@ -3605,7 +3609,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
down_read(&ctrl->namespaces_rwsem);
/* Forcibly unquiesce queues to avoid blocking dispatch */
- if (ctrl->admin_q)
+ if (ctrl->admin_q && !blk_queue_dying(ctrl->admin_q))
blk_mq_unquiesce_queue(ctrl->admin_q);
list_for_each_entry(ns, &ctrl->namespaces, list)
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 0b70c8bab045..feb86b59170e 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -152,6 +152,7 @@ struct nvme_fc_ctrl {
bool ioq_live;
bool assoc_active;
+ atomic_t err_work_active;
u64 association_id;
struct list_head ctrl_list; /* rport->ctrl_list */
@@ -160,6 +161,7 @@ struct nvme_fc_ctrl {
struct blk_mq_tag_set tag_set;
struct delayed_work connect_work;
+ struct work_struct err_work;
struct kref ref;
u32 flags;
@@ -1531,6 +1533,10 @@ nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
int i;
+ /* ensure we've initialized the ops once */
+ if (!(aen_op->flags & FCOP_FLAGS_AEN))
+ return;
+
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
__nvme_fc_abort_op(ctrl, aen_op);
}
@@ -1746,12 +1752,12 @@ nvme_fc_init_request(struct blk_mq_tag_set *set, struct request *rq,
struct nvme_fc_queue *queue = &ctrl->queues[queue_idx];
int res;
- nvme_req(rq)->ctrl = &ctrl->ctrl;
res = __nvme_fc_init_request(ctrl, queue, &op->op, rq, queue->rqcnt++);
if (res)
return res;
op->op.fcp_req.first_sgl = &op->sgl[0];
op->op.fcp_req.private = &op->priv[0];
+ nvme_req(rq)->ctrl = &ctrl->ctrl;
return res;
}
@@ -2049,7 +2055,25 @@ nvme_fc_nvme_ctrl_freed(struct nvme_ctrl *nctrl)
static void
nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg)
{
- /* only proceed if in LIVE state - e.g. on first error */
+ int active;
+
+ /*
+ * if an error (io timeout, etc) while (re)connecting,
+ * it's an error on creating the new association.
+ * Start the error recovery thread if it hasn't already
+ * been started. It is expected there could be multiple
+ * ios hitting this path before things are cleaned up.
+ */
+ if (ctrl->ctrl.state == NVME_CTRL_CONNECTING) {
+ active = atomic_xchg(&ctrl->err_work_active, 1);
+ if (!active && !schedule_work(&ctrl->err_work)) {
+ atomic_set(&ctrl->err_work_active, 0);
+ WARN_ON(1);
+ }
+ return;
+ }
+
+ /* Otherwise, only proceed if in LIVE state - e.g. on first error */
if (ctrl->ctrl.state != NVME_CTRL_LIVE)
return;
@@ -2814,6 +2838,7 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
{
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
+ cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
/*
* kill the association on the link side. this will block
@@ -2866,23 +2891,30 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
}
static void
-nvme_fc_reset_ctrl_work(struct work_struct *work)
+__nvme_fc_terminate_io(struct nvme_fc_ctrl *ctrl)
{
- struct nvme_fc_ctrl *ctrl =
- container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
- int ret;
-
- nvme_stop_ctrl(&ctrl->ctrl);
+ nvme_stop_keep_alive(&ctrl->ctrl);
/* will block will waiting for io to terminate */
nvme_fc_delete_association(ctrl);
- if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
+ if (ctrl->ctrl.state != NVME_CTRL_CONNECTING &&
+ !nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING))
dev_err(ctrl->ctrl.device,
"NVME-FC{%d}: error_recovery: Couldn't change state "
"to CONNECTING\n", ctrl->cnum);
- return;
- }
+}
+
+static void
+nvme_fc_reset_ctrl_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, ctrl.reset_work);
+ int ret;
+
+ __nvme_fc_terminate_io(ctrl);
+
+ nvme_stop_ctrl(&ctrl->ctrl);
if (ctrl->rport->remoteport.port_state == FC_OBJSTATE_ONLINE)
ret = nvme_fc_create_association(ctrl);
@@ -2897,6 +2929,24 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
ctrl->cnum);
}
+static void
+nvme_fc_connect_err_work(struct work_struct *work)
+{
+ struct nvme_fc_ctrl *ctrl =
+ container_of(work, struct nvme_fc_ctrl, err_work);
+
+ __nvme_fc_terminate_io(ctrl);
+
+ atomic_set(&ctrl->err_work_active, 0);
+
+ /*
+ * Rescheduling the connection after recovering
+ * from the io error is left to the reconnect work
+ * item, which is what should have stalled waiting on
+ * the io that had the error that scheduled this work.
+ */
+}
+
static const struct nvme_ctrl_ops nvme_fc_ctrl_ops = {
.name = "fc",
.module = THIS_MODULE,
@@ -3007,6 +3057,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
ctrl->cnum = idx;
ctrl->ioq_live = false;
ctrl->assoc_active = false;
+ atomic_set(&ctrl->err_work_active, 0);
init_waitqueue_head(&ctrl->ioabort_wait);
get_device(ctrl->dev);
@@ -3014,6 +3065,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
INIT_WORK(&ctrl->ctrl.reset_work, nvme_fc_reset_ctrl_work);
INIT_DELAYED_WORK(&ctrl->connect_work, nvme_fc_connect_ctrl_work);
+ INIT_WORK(&ctrl->err_work, nvme_fc_connect_err_work);
spin_lock_init(&ctrl->lock);
/* io queue count */
@@ -3103,6 +3155,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
fail_ctrl:
nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING);
cancel_work_sync(&ctrl->ctrl.reset_work);
+ cancel_work_sync(&ctrl->err_work);
cancel_delayed_work_sync(&ctrl->connect_work);
ctrl->ctrl.opts = NULL;
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 5e3cc8c59a39..9901afd804ce 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512);
+ blk_set_stacking_limits(&q->limits);
/* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index cee79cb388af..081cbdcce880 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -531,6 +531,9 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
static inline int nvme_mpath_init(struct nvme_ctrl *ctrl,
struct nvme_id_ctrl *id)
{
+ if (ctrl->subsys->cmic & (1 << 3))
+ dev_warn(ctrl->device,
+"Please enable CONFIG_NVME_MULTIPATH for full support of multi-port devices.\n");
return 0;
}
static inline void nvme_mpath_uninit(struct nvme_ctrl *ctrl)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index d181cafedc58..ab6ec7295bf9 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -184,6 +184,7 @@ static int nvme_rdma_alloc_qe(struct ib_device *ibdev, struct nvme_rdma_qe *qe,
qe->dma = ib_dma_map_single(ibdev, qe->data, capsule_size, dir);
if (ib_dma_mapping_error(ibdev, qe->dma)) {
kfree(qe->data);
+ qe->data = NULL;
return -ENOMEM;
}
@@ -823,6 +824,7 @@ out_free_tagset:
out_free_async_qe:
nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
sizeof(struct nvme_command), DMA_TO_DEVICE);
+ ctrl->async_event_sqe.data = NULL;
out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]);
return error;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index f4efe289dc7b..a5f9bbce863f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct pci_dev *p2p_dev;
int ret;
- if (!ctrl->p2p_client)
+ if (!ctrl->p2p_client || !ns->use_p2pmem)
return;
if (ns->p2p_dev) {
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ddce100be57a..3f7971d3706d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
int inline_page_count;
};
-static struct workqueue_struct *nvmet_rdma_delete_wq;
static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */
- flush_workqueue(nvmet_rdma_delete_wq);
+ flush_scheduled_work();
}
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) {
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
/* Destroying rdma_cm id is not needed here */
return 0;
}
@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) {
rdma_disconnect(queue->cm_id);
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
}
}
@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx);
- queue_work(nvmet_rdma_delete_wq, &queue->release_work);
+ schedule_work(&queue->release_work);
}
/**
@@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void)
if (ret)
goto err_ib_client;
- nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
- WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
- if (!nvmet_rdma_delete_wq) {
- ret = -ENOMEM;
- goto err_unreg_transport;
- }
-
return 0;
-err_unreg_transport:
- nvmet_unregister_transport(&nvmet_rdma_ops);
err_ib_client:
ib_unregister_client(&nvmet_rdma_ib_client);
return ret;
@@ -1674,7 +1664,6 @@ err_ib_client:
static void __exit nvmet_rdma_exit(void)
{
- destroy_workqueue(nvmet_rdma_delete_wq);
nvmet_unregister_transport(&nvmet_rdma_ops);
ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));