From 5a1e59533380a3fd04593e4ab2d4633ebf7745c1 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 22 Feb 2018 07:24:08 -0800 Subject: nvme-fabrics: don't check for non-NULL module in nvmf_register_transport THIS_MODULE evaluates to NULL when used from code built into the kernel, thus breaking built-in transport modules. Remove the bogus check. Fixes: 0de5cd36 ("nvme-fabrics: protect against module unload during create_ctrl") Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Johannes Thumshirn Signed-off-by: Keith Busch --- drivers/nvme/host/fabrics.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/nvme/host') diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 5dd4ceefed8f..a1c58e35075e 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -493,7 +493,7 @@ EXPORT_SYMBOL_GPL(nvmf_should_reconnect); */ int nvmf_register_transport(struct nvmf_transport_ops *ops) { - if (!ops->create_ctrl || !ops->module) + if (!ops->create_ctrl) return -EINVAL; down_write(&nvmf_transports_rwsem); -- cgit v1.2.3 From 0d30992395b1ed0e006960de1651b44cd51be791 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Thu, 22 Feb 2018 07:24:09 -0800 Subject: nvme-rdma: use blk_rq_payload_bytes instead of blk_rq_bytes blk_rq_bytes does the wrong thing for special payloads like discards and might cause the driver to not set up a SGL. Signed-off-by: Christoph Hellwig Reviewed-by: Sagi Grimberg Reviewed-by: Johannes Thumshirn Signed-off-by: Keith Busch --- drivers/nvme/host/rdma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/nvme/host') diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2bc059f7d73c..acc9eb21c242 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1051,7 +1051,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, struct nvme_rdma_device *dev = queue->device; struct ib_device *ibdev = dev->dev; - if (!blk_rq_bytes(rq)) + if (!blk_rq_payload_bytes(rq)) return; if (req->mr) { @@ -1166,7 +1166,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, c->common.flags |= NVME_CMD_SGL_METABUF; - if (!blk_rq_bytes(rq)) + if (!blk_rq_payload_bytes(rq)) return nvme_rdma_set_sg_null(c); req->sg_table.sgl = req->first_sgl; -- cgit v1.2.3 From f25a2dfc20e3a3ed8fe6618c331799dd7bd01190 Mon Sep 17 00:00:00 2001 From: Jianchao Wang Date: Thu, 15 Feb 2018 19:13:41 +0800 Subject: nvme-pci: Fix nvme queue cleanup if IRQ setup fails This patch fixes nvme queue cleanup if requesting an IRQ handler for the queue's vector fails. It does this by resetting the cq_vector to the uninitialized value of -1 so it is ignored for a controller reset. Signed-off-by: Jianchao Wang [changelog updates, removed misc whitespace changes] Signed-off-by: Keith Busch --- drivers/nvme/host/pci.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'drivers/nvme/host') diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 6fe7af00a1f4..022b070e60b7 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1452,7 +1452,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) nvmeq->cq_vector = qid - 1; result = adapter_alloc_cq(dev, qid, nvmeq); if (result < 0) - return result; + goto release_vector; result = adapter_alloc_sq(dev, qid, nvmeq); if (result < 0) @@ -1466,9 +1466,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) return result; release_sq: + dev->online_queues--; adapter_delete_sq(dev, qid); release_cq: adapter_delete_cq(dev, qid); + release_vector: + nvmeq->cq_vector = -1; return result; } -- cgit v1.2.3 From 9bd82b1a4418d9b7db000bf557ed608f2872b7c9 Mon Sep 17 00:00:00 2001 From: Baegjae Sung Date: Wed, 28 Feb 2018 16:06:04 +0900 Subject: nvme-multipath: fix sysfs dangerously created links If multipathing is enabled, each NVMe subsystem creates a head namespace (e.g., nvme0n1) and multiple private namespaces (e.g., nvme0c0n1 and nvme0c1n1) in sysfs. When creating links for private namespaces, links of head namespace are used, so the namespace creation order must be followed (e.g., nvme0n1 -> nvme0c1n1). If the order is not followed, links of sysfs will be incomplete or kernel panic will occur. The kernel panic was: kernel BUG at fs/sysfs/symlink.c:27! Call Trace: nvme_mpath_add_disk_links+0x5d/0x80 [nvme_core] nvme_validate_ns+0x5c2/0x850 [nvme_core] nvme_scan_work+0x1af/0x2d0 [nvme_core] Correct order Context A Context B nvme0n1 nvme0c0n1 nvme0c1n1 Incorrect order Context A Context B nvme0c1n1 nvme0n1 nvme0c0n1 The nvme_mpath_add_disk (for creating head namespace) is called just before the nvme_mpath_add_disk_links (for creating private namespaces). In nvme_mpath_add_disk, the first context acquires the lock of subsystem and creates a head namespace, and other contexts do nothing by checking GENHD_FL_UP of a head namespace after waiting to acquire the lock. We verified the code with or without multipathing using three vendors of dual-port NVMe SSDs. Signed-off-by: Baegjae Sung Reviewed-by: Christoph Hellwig Signed-off-by: Keith Busch --- drivers/nvme/host/core.c | 12 +++--------- drivers/nvme/host/multipath.c | 15 ++++++++++----- 2 files changed, 13 insertions(+), 14 deletions(-) (limited to 'drivers/nvme/host') diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f431c32774f3..6088ea13a6bf 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2835,7 +2835,7 @@ out: } static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, - struct nvme_id_ns *id, bool *new) + struct nvme_id_ns *id) { struct nvme_ctrl *ctrl = ns->ctrl; bool is_shared = id->nmic & (1 << 0); @@ -2851,8 +2851,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, ret = PTR_ERR(head); goto out_unlock; } - - *new = true; } else { struct nvme_ns_ids ids; @@ -2864,8 +2862,6 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, ret = -EINVAL; goto out_unlock; } - - *new = false; } list_add_tail(&ns->siblings, &head->list); @@ -2936,7 +2932,6 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) struct nvme_id_ns *id; char disk_name[DISK_NAME_LEN]; int node = dev_to_node(ctrl->dev), flags = GENHD_FL_EXT_DEVT; - bool new = true; ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); if (!ns) @@ -2962,7 +2957,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) if (id->ncap == 0) goto out_free_id; - if (nvme_init_ns_head(ns, nsid, id, &new)) + if (nvme_init_ns_head(ns, nsid, id)) goto out_free_id; nvme_setup_streams_ns(ctrl, ns); @@ -3028,8 +3023,7 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) pr_warn("%s: failed to register lightnvm sysfs group for identification\n", ns->disk->disk_name); - if (new) - nvme_mpath_add_disk(ns->head); + nvme_mpath_add_disk(ns->head); nvme_mpath_add_disk_links(ns); return; out_unlink_ns: diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 3b211d9e58b8..b7e5c6db4d92 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -198,11 +198,16 @@ void nvme_mpath_add_disk(struct nvme_ns_head *head) { if (!head->disk) return; - device_add_disk(&head->subsys->dev, head->disk); - if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, - &nvme_ns_id_attr_group)) - pr_warn("%s: failed to create sysfs group for identification\n", - head->disk->disk_name); + + mutex_lock(&head->subsys->lock); + if (!(head->disk->flags & GENHD_FL_UP)) { + device_add_disk(&head->subsys->dev, head->disk); + if (sysfs_create_group(&disk_to_dev(head->disk)->kobj, + &nvme_ns_id_attr_group)) + pr_warn("%s: failed to create sysfs group for identification\n", + head->disk->disk_name); + } + mutex_unlock(&head->subsys->lock); } void nvme_mpath_add_disk_links(struct nvme_ns *ns) -- cgit v1.2.3